#include <stdint.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/filedesc.h>
#include <sys/kernel.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/malloc.h>
#include <sys/unistd.h>
#include <sys/file_internal.h>
#include <sys/fcntl.h>
#include <sys/select.h>
#include <sys/queue.h>
#include <sys/event.h>
#include <sys/eventvar.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/sysproto.h>
#include <sys/user.h>
#include <sys/vnode_internal.h>
#include <string.h>
#include <sys/proc_info.h>
#include <sys/codesign.h>
#include <sys/pthread_shims.h>
#include <kern/locks.h>
#include <kern/clock.h>
#include <kern/policy_internal.h>
#include <kern/thread_call.h>
#include <kern/sched_prim.h>
#include <kern/waitq.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
#include <kern/assert.h>
#include <libkern/libkern.h>
#include "net/net_str_id.h"
#include <mach/task.h>
#if CONFIG_MEMORYSTATUS
#include <sys/kern_memorystatus.h>
#endif
typedef int32_t qos_t;
MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
#define KQ_EVENT NO_EVENT64
static inline void kqlock(struct kqueue *kq);
static inline void kqunlock(struct kqueue *kq);
static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
static int kqlock2knotedetach(struct kqueue *kq, struct knote *kn);
static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int defer_drop);
static int kqueue_read(struct fileproc *fp, struct uio *uio,
int flags, vfs_context_t ctx);
static int kqueue_write(struct fileproc *fp, struct uio *uio,
int flags, vfs_context_t ctx);
static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data,
vfs_context_t ctx);
static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
vfs_context_t ctx);
static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
vfs_context_t ctx);
static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
static const struct fileops kqueueops = {
.fo_type = DTYPE_KQUEUE,
.fo_read = kqueue_read,
.fo_write = kqueue_write,
.fo_ioctl = kqueue_ioctl,
.fo_select = kqueue_select,
.fo_close = kqueue_close,
.fo_kqfilter = kqueue_kqfilter,
.fo_drain = kqueue_drain,
};
static int kevent_internal(struct proc *p, int fd,
user_addr_t changelist, int nchanges,
user_addr_t eventlist, int nevents,
user_addr_t data_out, uint64_t data_available,
unsigned int flags, user_addr_t utimeout,
kqueue_continue_t continuation,
int32_t *retval);
static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp,
struct proc *p, unsigned int flags);
static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp,
struct proc *p, unsigned int flags);
char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n);
static void kqueue_interrupt(struct kqueue *kq);
static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp,
void *data);
static void kevent_continue(struct kqueue *kq, void *data, int error);
static void kqueue_scan_continue(void *contp, wait_result_t wait_result);
static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data,
struct filt_process_s *process_data, kq_index_t servicer_qos_index,
int *countp, struct proc *p);
static int kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
static void kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags);
static struct kqtailq *kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index);
static struct kqtailq *kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index);
static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index);
static struct kqtailq *kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index);
static void kqworkq_request_thread(struct kqworkq *kqwq, kq_index_t qos_index);
static void kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index, uint32_t type);
static void kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index);
static void kqworkq_bind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
static void kqworkq_unbind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags);
static struct kqrequest *kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data,
struct filt_process_s *process_data, struct proc *p);
#if 0
static void knote_put(struct knote *kn);
#endif
static int knote_fdadd(struct knote *kn, struct proc *p);
static void knote_fdremove(struct knote *kn, struct proc *p);
static struct knote *knote_fdfind(struct kqueue *kq, struct kevent_internal_s *kev, struct proc *p);
static void knote_drop(struct knote *kn, struct proc *p);
static struct knote *knote_alloc(void);
static void knote_free(struct knote *kn);
static void knote_activate(struct knote *kn);
static void knote_deactivate(struct knote *kn);
static void knote_enable(struct knote *kn);
static void knote_disable(struct knote *kn);
static int knote_enqueue(struct knote *kn);
static void knote_dequeue(struct knote *kn);
static void knote_suppress(struct knote *kn);
static void knote_unsuppress(struct knote *kn);
static void knote_wakeup(struct knote *kn);
static kq_index_t knote_get_queue_index(struct knote *kn);
static struct kqtailq *knote_get_queue(struct knote *kn);
static struct kqtailq *knote_get_suppressed_queue(struct knote *kn);
static kq_index_t knote_get_req_index(struct knote *kn);
static kq_index_t knote_get_qos_index(struct knote *kn);
static void knote_set_qos_index(struct knote *kn, kq_index_t qos_index);
static kq_index_t knote_get_qos_override_index(struct knote *kn);
static void knote_set_qos_override_index(struct knote *kn, kq_index_t qos_index);
static int filt_fileattach(struct knote *kn);
static struct filterops file_filtops = {
.f_isfd = 1,
.f_attach = filt_fileattach,
};
static void filt_kqdetach(struct knote *kn);
static int filt_kqueue(struct knote *kn, long hint);
static int filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static struct filterops kqread_filtops = {
.f_isfd = 1,
.f_detach = filt_kqdetach,
.f_event = filt_kqueue,
.f_touch = filt_kqtouch,
.f_process = filt_kqprocess,
};
static int filt_badattach(struct knote *kn);
static struct filterops bad_filtops = {
.f_attach = filt_badattach,
};
static int filt_procattach(struct knote *kn);
static void filt_procdetach(struct knote *kn);
static int filt_proc(struct knote *kn, long hint);
static int filt_proctouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static struct filterops proc_filtops = {
.f_attach = filt_procattach,
.f_detach = filt_procdetach,
.f_event = filt_proc,
.f_touch = filt_proctouch,
.f_process = filt_procprocess,
};
#if CONFIG_MEMORYSTATUS
extern struct filterops memorystatus_filtops;
#endif
extern struct filterops fs_filtops;
extern struct filterops sig_filtops;
static int filt_timerattach(struct knote *kn);
static void filt_timerdetach(struct knote *kn);
static int filt_timer(struct knote *kn, long hint);
static int filt_timertouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_timerprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static struct filterops timer_filtops = {
.f_attach = filt_timerattach,
.f_detach = filt_timerdetach,
.f_event = filt_timer,
.f_touch = filt_timertouch,
.f_process = filt_timerprocess,
};
static void filt_timerexpire(void *knx, void *param1);
static int filt_timervalidate(struct knote *kn);
static void filt_timerupdate(struct knote *kn, int num_fired);
static void filt_timercancel(struct knote *kn);
#define TIMER_RUNNING 0x1
#define TIMER_CANCELWAIT 0x2
static lck_mtx_t _filt_timerlock;
static void filt_timerlock(void);
static void filt_timerunlock(void);
static zone_t knote_zone;
static zone_t kqfile_zone;
static zone_t kqworkq_zone;
#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
#if 0
extern struct filterops aio_filtops;
#endif
extern struct filterops machport_filtops;
static int filt_userattach(struct knote *kn);
static void filt_userdetach(struct knote *kn);
static int filt_user(struct knote *kn, long hint);
static int filt_usertouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_userprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static struct filterops user_filtops = {
.f_attach = filt_userattach,
.f_detach = filt_userdetach,
.f_event = filt_user,
.f_touch = filt_usertouch,
.f_process = filt_userprocess,
};
static lck_spin_t _filt_userlock;
static void filt_userlock(void);
static void filt_userunlock(void);
extern struct filterops pipe_rfiltops;
extern struct filterops pipe_wfiltops;
extern struct filterops ptsd_kqops;
extern struct filterops soread_filtops;
extern struct filterops sowrite_filtops;
extern struct filterops sock_filtops;
extern struct filterops soexcept_filtops;
extern struct filterops spec_filtops;
extern struct filterops bpfread_filtops;
extern struct filterops necp_fd_rfiltops;
extern struct filterops skywalk_channel_rfiltops;
extern struct filterops skywalk_channel_wfiltops;
extern struct filterops fsevent_filtops;
extern struct filterops vnode_filtops;
static struct filterops *sysfilt_ops[EVFILTID_MAX] = {
[~EVFILT_READ] = &file_filtops,
[~EVFILT_WRITE] = &file_filtops,
[~EVFILT_AIO] = &bad_filtops,
[~EVFILT_VNODE] = &file_filtops,
[~EVFILT_PROC] = &proc_filtops,
[~EVFILT_SIGNAL] = &sig_filtops,
[~EVFILT_TIMER] = &timer_filtops,
[~EVFILT_MACHPORT] = &machport_filtops,
[~EVFILT_FS] = &fs_filtops,
[~EVFILT_USER] = &user_filtops,
&bad_filtops,
&bad_filtops,
[~EVFILT_SOCK] = &file_filtops,
#if CONFIG_MEMORYSTATUS
[~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
#else
[~EVFILT_MEMORYSTATUS] = &bad_filtops,
#endif
[~EVFILT_EXCEPT] = &file_filtops,
[EVFILTID_KQREAD] = &kqread_filtops,
[EVFILTID_PIPE_R] = &pipe_rfiltops,
[EVFILTID_PIPE_W] = &pipe_wfiltops,
[EVFILTID_PTSD] = &ptsd_kqops,
[EVFILTID_SOREAD] = &soread_filtops,
[EVFILTID_SOWRITE] = &sowrite_filtops,
[EVFILTID_SCK] = &sock_filtops,
[EVFILTID_SOEXCEPT] = &soexcept_filtops,
[EVFILTID_SPEC] = &spec_filtops,
[EVFILTID_BPFREAD] = &bpfread_filtops,
[EVFILTID_NECP_FD] = &necp_fd_rfiltops,
[EVFILTID_FSEVENT] = &fsevent_filtops,
[EVFILTID_VN] = &vnode_filtops
};
void waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos);
#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
#endif
#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
#endif
#ifndef _PTHREAD_PRIORITY_QOS_CLASS_MASK
#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00
#endif
#ifndef _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32
#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 8
#endif
static inline
qos_t canonicalize_kevent_qos(qos_t qos)
{
unsigned long canonical;
canonical = pthread_priority_canonicalize(qos, FALSE);
return (qos_t)canonical;
}
static inline
kq_index_t qos_index_from_qos(qos_t qos, boolean_t propagation)
{
kq_index_t qos_index;
unsigned long flags = 0;
qos_index = (kq_index_t)thread_qos_from_pthread_priority(
(unsigned long)qos, &flags);
if (!propagation && (flags & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG))
return KQWQ_QOS_MANAGER;
return qos_index;
}
static inline
qos_t qos_from_qos_index(kq_index_t qos_index)
{
if (qos_index == KQWQ_QOS_MANAGER)
return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
if (qos_index == 0)
return 0;
return (1 << (qos_index - 1 +
_PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32));
}
static inline
kq_index_t qos_index_for_servicer(int qos_class, thread_t thread, int flags)
{
kq_index_t qos_index;
if (flags & KEVENT_FLAG_WORKQ_MANAGER)
return KQWQ_QOS_MANAGER;
assert(qos_class != -1);
if (qos_class == -1)
qos_index = proc_get_thread_policy(thread,
TASK_POLICY_ATTRIBUTE,
TASK_POLICY_QOS);
else
qos_index = (kq_index_t)qos_class;
assert(qos_index > 0 && qos_index < KQWQ_NQOS);
return qos_index;
}
lck_grp_attr_t * kq_lck_grp_attr;
lck_grp_t * kq_lck_grp;
lck_attr_t * kq_lck_attr;
static inline void
kqlock(struct kqueue *kq)
{
lck_spin_lock(&kq->kq_lock);
}
static inline void
kqunlock(struct kqueue *kq)
{
lck_spin_unlock(&kq->kq_lock);
}
static int
kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
{
if (kn->kn_status & (KN_DROPPING | KN_VANISHED))
return (0);
assert(kn->kn_status & KN_ATTACHED);
kn->kn_inuse++;
kqunlock(kq);
return (1);
}
static int
knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int steal_drop)
{
int dropped = 0;
kqlock(kq);
if (--kn->kn_inuse == 0) {
if ((kn->kn_status & KN_ATTACHING) != 0) {
kn->kn_status &= ~KN_ATTACHING;
}
if ((kn->kn_status & KN_USEWAIT) != 0) {
wait_result_t result;
if (kn->kn_status & KN_DROPPING) {
if (steal_drop && !(kn->kn_status & KN_STOLENDROP)) {
kn->kn_status |= KN_STOLENDROP;
} else {
dropped = 1;
}
}
result = (kn->kn_status & KN_STOLENDROP) ?
THREAD_RESTART : THREAD_AWAKENED;
kn->kn_status &= ~KN_USEWAIT;
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
result,
WAITQ_ALL_PRIORITIES);
} else {
assert((kn->kn_status & (KN_DROPPING|KN_STOLENDROP)) == 0);
}
} else if (kn->kn_status & KN_DROPPING) {
if (steal_drop && ((kn->kn_status & KN_STOLENDROP) == 0)) {
kn->kn_status |= KN_STOLENDROP;
kn->kn_status |= KN_USEWAIT;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
kqlock(kq);
} else {
dropped = 1;
}
}
return (!dropped);
}
static int
kqlock2knotedetach(struct kqueue *kq, struct knote *kn)
{
if ((kn->kn_status & KN_DROPPING) || kn->kn_inuse) {
kn->kn_status |= KN_USEWAIT;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
return (0);
}
assert((kn->kn_status & KN_VANISHED) == 0);
assert(kn->kn_status & KN_ATTACHED);
kn->kn_status &= ~KN_ATTACHED;
kn->kn_status |= KN_VANISHED;
kn->kn_inuse++;
kqunlock(kq);
return (1);
}
static int
kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
{
int oktodrop;
wait_result_t result;
oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0);
kn->kn_status |= KN_DROPPING;
knote_unsuppress(kn);
knote_dequeue(kn);
if (oktodrop) {
if (kn->kn_inuse == 0) {
kqunlock(kq);
return (oktodrop);
}
}
kn->kn_status |= KN_USEWAIT;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
result = thread_block(THREAD_CONTINUE_NULL);
return (result == THREAD_AWAKENED);
}
#if 0
static void
knote_put(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
if (--kn->kn_inuse == 0) {
if ((kn->kn_status & KN_USEWAIT) != 0) {
kn->kn_status &= ~KN_USEWAIT;
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_AWAKENED,
WAITQ_ALL_PRIORITIES);
}
}
kqunlock(kq);
}
#endif
static int
filt_fileattach(struct knote *kn)
{
return (fo_kqfilter(kn->kn_fp, kn, vfs_context_current()));
}
#define f_flag f_fglob->fg_flag
#define f_msgcount f_fglob->fg_msgcount
#define f_cred f_fglob->fg_cred
#define f_ops f_fglob->fg_ops
#define f_offset f_fglob->fg_offset
#define f_data f_fglob->fg_data
static void
filt_kqdetach(struct knote *kn)
{
struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
struct kqueue *kq = &kqf->kqf_kqueue;
kqlock(kq);
KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
kqunlock(kq);
}
static int
filt_kqueue(struct knote *kn, __unused long hint)
{
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
int count;
count = kq->kq_count;
return (count > 0);
}
static int
filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev)
{
#pragma unused(kev)
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
int res;
kqlock(kq);
kn->kn_data = kq->kq_count;
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
res = (kn->kn_data > 0);
kqunlock(kq);
return res;
}
static int
filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
{
#pragma unused(data)
struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
int res;
kqlock(kq);
kn->kn_data = kq->kq_count;
res = (kn->kn_data > 0);
if (res) {
*kev = kn->kn_kevent;
if (kn->kn_flags & EV_CLEAR)
kn->kn_data = 0;
}
kqunlock(kq);
return res;
}
static int
filt_procattach(struct knote *kn)
{
struct proc *p;
assert(PID_MAX < NOTE_PDATAMASK);
if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
kn->kn_flags = EV_ERROR;
kn->kn_data = ENOTSUP;
return 0;
}
p = proc_find(kn->kn_id);
if (p == NULL) {
kn->kn_flags = EV_ERROR;
kn->kn_data = ESRCH;
return 0;
}
const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
do {
pid_t selfpid = proc_selfpid();
if (p->p_ppid == selfpid)
break;
if ((p->p_lflag & P_LTRACED) != 0 &&
(p->p_oppid == selfpid))
break;
proc_rele(p);
kn->kn_flags = EV_ERROR;
kn->kn_data = EACCES;
return 0;
} while (0);
proc_klist_lock();
kn->kn_ptr.p_proc = p;
KNOTE_ATTACH(&p->p_klist, kn);
proc_klist_unlock();
proc_rele(p);
return (0);
}
static void
filt_procdetach(struct knote *kn)
{
struct proc *p;
proc_klist_lock();
p = kn->kn_ptr.p_proc;
if (p != PROC_NULL) {
kn->kn_ptr.p_proc = PROC_NULL;
KNOTE_DETACH(&p->p_klist, kn);
}
proc_klist_unlock();
}
static int
filt_proc(struct knote *kn, long hint)
{
u_int event;
event = (u_int)hint & NOTE_PCTRLMASK;
if (event & NOTE_EXIT) {
if ((kn->kn_ptr.p_proc->p_oppid != 0)
&& (knote_get_kq(kn)->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) {
return 0;
}
}
if (kn->kn_sfflags & event)
kn->kn_fflags |= event;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
}
#pragma clang diagnostic pop
if (event == NOTE_EXIT) {
kn->kn_data = 0;
if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
kn->kn_fflags |= NOTE_EXITSTATUS;
kn->kn_data |= (hint & NOTE_PDATAMASK);
}
if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
kn->kn_fflags |= NOTE_EXIT_DETAIL;
if ((kn->kn_ptr.p_proc->p_lflag &
P_LTERM_DECRYPTFAIL) != 0) {
kn->kn_data |= NOTE_EXIT_DECRYPTFAIL;
}
if ((kn->kn_ptr.p_proc->p_lflag &
P_LTERM_JETSAM) != 0) {
kn->kn_data |= NOTE_EXIT_MEMORY;
switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) {
case P_JETSAM_VMPAGESHORTAGE:
kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
break;
case P_JETSAM_VMTHRASHING:
kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING;
break;
case P_JETSAM_FCTHRASHING:
kn->kn_data |= NOTE_EXIT_MEMORY_FCTHRASHING;
break;
case P_JETSAM_VNODE:
kn->kn_data |= NOTE_EXIT_MEMORY_VNODE;
break;
case P_JETSAM_HIWAT:
kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT;
break;
case P_JETSAM_PID:
kn->kn_data |= NOTE_EXIT_MEMORY_PID;
break;
case P_JETSAM_IDLEEXIT:
kn->kn_data |= NOTE_EXIT_MEMORY_IDLE;
break;
}
}
if ((kn->kn_ptr.p_proc->p_csflags &
CS_KILLED) != 0) {
kn->kn_data |= NOTE_EXIT_CSERROR;
}
}
}
return (kn->kn_fflags != 0);
}
static int
filt_proctouch(struct knote *kn, struct kevent_internal_s *kev)
{
int res;
proc_klist_lock();
kn->kn_sfflags = kev->fflags;
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
res = (kn->kn_fflags != 0);
proc_klist_unlock();
return res;
}
static int
filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
{
#pragma unused(data)
int res;
proc_klist_lock();
res = (kn->kn_fflags != 0);
if (res) {
*kev = kn->kn_kevent;
kn->kn_flags |= EV_CLEAR;
kn->kn_fflags = 0;
kn->kn_data = 0;
}
proc_klist_unlock();
return res;
}
static int
filt_timervalidate(struct knote *kn)
{
uint64_t multiplier;
uint64_t raw = 0;
switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS)) {
case NOTE_SECONDS:
multiplier = NSEC_PER_SEC;
break;
case NOTE_USECONDS:
multiplier = NSEC_PER_USEC;
break;
case NOTE_NSECONDS:
multiplier = 1;
break;
case 0:
multiplier = NSEC_PER_SEC / 1000;
break;
default:
return (EINVAL);
}
if(kn->kn_sfflags & NOTE_LEEWAY){
nanoseconds_to_absolutetime((uint64_t)kn->kn_ext[1] * multiplier, &raw);
kn->kn_ext[1] = raw;
}
nanoseconds_to_absolutetime((uint64_t)kn->kn_sdata * multiplier, &raw);
kn->kn_ext[0] = 0;
kn->kn_sdata = 0;
if (kn->kn_sfflags & NOTE_ABSOLUTE) {
clock_sec_t seconds;
clock_nsec_t nanoseconds;
uint64_t now;
clock_get_calendar_nanotime(&seconds, &nanoseconds);
nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC +
nanoseconds, &now);
if (now < raw) {
raw -= now;
if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
clock_continuoustime_interval_to_deadline(raw,
&kn->kn_ext[0]);
} else {
clock_absolutetime_interval_to_deadline(raw,
&kn->kn_ext[0]);
}
}
} else {
kn->kn_sdata = raw;
}
return (0);
}
static void
filt_timerupdate(struct knote *kn, int num_fired)
{
assert(num_fired > 0);
if (kn->kn_sdata == 0)
return;
if (kn->kn_ext[0] == 0) {
assert(num_fired == 1);
if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
clock_continuoustime_interval_to_deadline(kn->kn_sdata,
&kn->kn_ext[0]);
} else {
clock_absolutetime_interval_to_deadline(kn->kn_sdata,
&kn->kn_ext[0]);
}
} else {
kn->kn_ext[0] += (kn->kn_sdata * num_fired);
}
}
static void
filt_timerexpire(void *knx, __unused void *spare)
{
struct klist timer_list;
struct knote *kn = knx;
filt_timerlock();
kn->kn_hookid &= ~TIMER_RUNNING;
SLIST_INIT(&timer_list);
SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext);
KNOTE(&timer_list, 1);
if (kn->kn_hookid & TIMER_CANCELWAIT) {
struct kqueue *kq = knote_get_kq(kn);
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_hook),
THREAD_AWAKENED,
WAITQ_ALL_PRIORITIES);
}
filt_timerunlock();
}
static void
filt_timercancel(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
thread_call_t callout = kn->kn_hook;
boolean_t cancelled;
if (kn->kn_hookid & TIMER_RUNNING) {
cancelled = thread_call_cancel(callout);
if (cancelled) {
kn->kn_hookid &= ~TIMER_RUNNING;
} else {
kn->kn_hookid |= TIMER_CANCELWAIT;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_hook),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
filt_timerunlock();
thread_block(THREAD_CONTINUE_NULL);
filt_timerlock();
assert((kn->kn_hookid & TIMER_RUNNING) == 0);
}
}
}
static int
filt_timerattach(struct knote *kn)
{
thread_call_t callout;
int error;
int res;
callout = thread_call_allocate(filt_timerexpire, kn);
if (NULL == callout) {
kn->kn_flags = EV_ERROR;
kn->kn_data = ENOMEM;
return 0;
}
filt_timerlock();
error = filt_timervalidate(kn);
if (error != 0) {
filt_timerunlock();
thread_call_free(callout);
kn->kn_flags = EV_ERROR;
kn->kn_data = error;
return 0;
}
kn->kn_hook = (void*)callout;
kn->kn_hookid = 0;
if (kn->kn_sfflags & NOTE_ABSOLUTE)
kn->kn_flags |= EV_ONESHOT;
filt_timerupdate(kn, 1);
if (kn->kn_ext[0]) {
kn->kn_flags |= EV_CLEAR;
unsigned int timer_flags = 0;
if (kn->kn_sfflags & NOTE_CRITICAL)
timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
else if (kn->kn_sfflags & NOTE_BACKGROUND)
timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
else
timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
if (kn->kn_sfflags & NOTE_LEEWAY)
timer_flags |= THREAD_CALL_DELAY_LEEWAY;
if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME)
timer_flags |= THREAD_CALL_CONTINUOUS;
thread_call_enter_delayed_with_leeway(callout, NULL,
kn->kn_ext[0], kn->kn_ext[1], timer_flags);
kn->kn_hookid |= TIMER_RUNNING;
} else {
kn->kn_data = 1;
}
res = (kn->kn_data > 0);
filt_timerunlock();
return res;
}
static void
filt_timerdetach(struct knote *kn)
{
thread_call_t callout;
filt_timerlock();
callout = (thread_call_t)kn->kn_hook;
filt_timercancel(kn);
filt_timerunlock();
thread_call_free(callout);
}
static int filt_timer_num_fired(struct knote *kn)
{
int num_fired = 1;
if ((kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) &&
(kn->kn_sdata != 0) &&
(kn->kn_ext[0] != 0))
{
const uint64_t now = mach_continuous_time();
assert(now >= kn->kn_ext[0]);
assert(kn->kn_sdata > 0);
const uint64_t overrun_ticks = now - kn->kn_ext[0];
const uint64_t kn_sdata = kn->kn_sdata;
if (overrun_ticks < kn_sdata) {
num_fired = 1;
} else if (overrun_ticks < (kn_sdata << 1)) {
num_fired = 2;
} else {
num_fired = (overrun_ticks / kn_sdata) + 1;
}
}
return num_fired;
}
static int
filt_timer(
struct knote *kn,
long hint)
{
#pragma unused(hint)
int num_fired = filt_timer_num_fired(kn);
kn->kn_data += num_fired;
if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) &&
((kn->kn_flags & EV_ONESHOT) == 0)) {
filt_timerupdate(kn, num_fired);
if (kn->kn_ext[0]) {
unsigned int timer_flags = 0;
if (kn->kn_sfflags & NOTE_CRITICAL)
timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
else if (kn->kn_sfflags & NOTE_BACKGROUND)
timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
else
timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
if (kn->kn_sfflags & NOTE_LEEWAY)
timer_flags |= THREAD_CALL_DELAY_LEEWAY;
thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
kn->kn_ext[0], kn->kn_ext[1], timer_flags);
kn->kn_hookid |= TIMER_RUNNING;
}
}
return (1);
}
static int
filt_timertouch(
struct knote *kn,
struct kevent_internal_s *kev)
{
int error;
int res;
filt_timerlock();
filt_timercancel(kn);
kn->kn_sdata = kev->data;
kn->kn_sfflags = kev->fflags;
kn->kn_ext[0] = kev->ext[0];
kn->kn_ext[1] = kev->ext[1];
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
error = filt_timervalidate(kn);
if (error) {
filt_timerunlock();
kn->kn_flags |= EV_ERROR;
kn->kn_data = error;
return 1;
}
filt_timerupdate(kn, 1);
if (kn->kn_ext[0]) {
unsigned int timer_flags = 0;
if (kn->kn_sfflags & NOTE_CRITICAL)
timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
else if (kn->kn_sfflags & NOTE_BACKGROUND)
timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
else
timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
if (kn->kn_sfflags & NOTE_LEEWAY)
timer_flags |= THREAD_CALL_DELAY_LEEWAY;
thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL,
kn->kn_ext[0], kn->kn_ext[1], timer_flags);
kn->kn_hookid |= TIMER_RUNNING;
} else {
kn->kn_data = 1;
}
res = (kn->kn_data > 0);
filt_timerunlock();
return res;
}
static int
filt_timerprocess(
struct knote *kn,
__unused struct filt_process_s *data,
struct kevent_internal_s *kev)
{
filt_timerlock();
if (kn->kn_data == 0) {
filt_timerunlock();
return 0;
}
*kev = kn->kn_kevent;
kev->ext[0] = 0;
kn->kn_data = 0;
if (kn->kn_flags & EV_CLEAR)
kn->kn_fflags = 0;
filt_timerunlock();
return 1;
}
static void
filt_timerlock(void)
{
lck_mtx_lock(&_filt_timerlock);
}
static void
filt_timerunlock(void)
{
lck_mtx_unlock(&_filt_timerlock);
}
static void
filt_userlock(void)
{
lck_spin_lock(&_filt_userlock);
}
static void
filt_userunlock(void)
{
lck_spin_unlock(&_filt_userlock);
}
static int
filt_userattach(struct knote *kn)
{
kn->kn_hook = NULL;
if (kn->kn_fflags & NOTE_TRIGGER) {
kn->kn_hookid = 1;
} else {
kn->kn_hookid = 0;
}
return (kn->kn_hookid);
}
static void
filt_userdetach(__unused struct knote *kn)
{
}
static int
filt_user(
__unused struct knote *kn,
__unused long hint)
{
panic("filt_user");
return 0;
}
static int
filt_usertouch(
struct knote *kn,
struct kevent_internal_s *kev)
{
uint32_t ffctrl;
int fflags;
int active;
filt_userlock();
ffctrl = kev->fflags & NOTE_FFCTRLMASK;
fflags = kev->fflags & NOTE_FFLAGSMASK;
switch (ffctrl) {
case NOTE_FFNOP:
break;
case NOTE_FFAND:
kn->kn_sfflags &= fflags;
break;
case NOTE_FFOR:
kn->kn_sfflags |= fflags;
break;
case NOTE_FFCOPY:
kn->kn_sfflags = fflags;
break;
}
kn->kn_sdata = kev->data;
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
if (kev->fflags & NOTE_TRIGGER) {
kn->kn_hookid = 1;
}
active = kn->kn_hookid;
filt_userunlock();
return (active);
}
static int
filt_userprocess(
struct knote *kn,
__unused struct filt_process_s *data,
struct kevent_internal_s *kev)
{
filt_userlock();
if (kn->kn_hookid == 0) {
filt_userunlock();
return 0;
}
*kev = kn->kn_kevent;
kev->fflags = (volatile UInt32)kn->kn_sfflags;
kev->data = kn->kn_sdata;
if (kn->kn_flags & EV_CLEAR) {
kn->kn_hookid = 0;
kn->kn_data = 0;
kn->kn_fflags = 0;
}
filt_userunlock();
return 1;
}
static int
filt_badattach(__unused struct knote *kn)
{
kn->kn_flags |= EV_ERROR;
kn->kn_data = ENOTSUP;
return 0;
}
struct kqueue *
kqueue_alloc(struct proc *p, unsigned int flags)
{
struct filedesc *fdp = p->p_fd;
struct kqueue *kq = NULL;
int policy;
void *hook;
uint64_t kq_addr_offset;
if (flags & KEVENT_FLAG_WORKQ) {
struct kqworkq *kqwq;
int i;
kqwq = (struct kqworkq *)zalloc(kqworkq_zone);
if (kqwq == NULL)
return NULL;
kq = &kqwq->kqwq_kqueue;
bzero(kqwq, sizeof (struct kqworkq));
kqwq->kqwq_state = KQ_WORKQ;
for (i = 0; i < KQWQ_NBUCKETS; i++) {
TAILQ_INIT(&kq->kq_queue[i]);
}
for (i = 0; i < KQWQ_NQOS; i++) {
TAILQ_INIT(&kqwq->kqwq_request[i].kqr_suppressed);
}
lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr);
policy = SYNC_POLICY_FIFO;
hook = (void *)kqwq;
} else {
struct kqfile *kqf;
kqf = (struct kqfile *)zalloc(kqfile_zone);
if (kqf == NULL)
return NULL;
kq = &kqf->kqf_kqueue;
bzero(kqf, sizeof (struct kqfile));
TAILQ_INIT(&kq->kq_queue[0]);
TAILQ_INIT(&kqf->kqf_suppressed);
policy = SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST;
hook = NULL;
}
waitq_set_init(&kq->kq_wqs, policy, NULL, hook);
lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
kq->kq_p = p;
if (fdp->fd_knlistsize < 0) {
proc_fdlock(p);
if (fdp->fd_knlistsize < 0)
fdp->fd_knlistsize = 0;
proc_fdunlock(p);
}
kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS);
assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE));
return (kq);
}
void
kqueue_dealloc(struct kqueue *kq)
{
struct proc *p;
struct filedesc *fdp;
struct knote *kn;
int i;
if (kq == NULL)
return;
p = kq->kq_p;
fdp = p->p_fd;
proc_fdlock(p);
for (i = 0; i < fdp->fd_knlistsize; i++) {
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
while (kn != NULL) {
if (kq == knote_get_kq(kn)) {
kqlock(kq);
proc_fdunlock(p);
if (kqlock2knotedrop(kq, kn)) {
knote_drop(kn, p);
}
proc_fdlock(p);
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
continue;
}
kn = SLIST_NEXT(kn, kn_link);
}
}
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
while (kn != NULL) {
if (kq == knote_get_kq(kn)) {
kqlock(kq);
proc_fdunlock(p);
if (kqlock2knotedrop(kq, kn)) {
knote_drop(kn, p);
}
proc_fdlock(p);
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
continue;
}
kn = SLIST_NEXT(kn, kn_link);
}
}
}
proc_fdunlock(p);
waitq_set_deinit(&kq->kq_wqs);
lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp);
zfree(kqworkq_zone, kqwq);
} else {
struct kqfile *kqf = (struct kqfile *)kq;
zfree(kqfile_zone, kqf);
}
}
int
kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval)
{
struct kqueue *kq;
struct fileproc *fp;
int fd, error;
error = falloc_withalloc(p,
&fp, &fd, vfs_context_current(), fp_zalloc, cra);
if (error) {
return (error);
}
kq = kqueue_alloc(p, 0);
if (kq == NULL) {
fp_free(p, fd, fp);
return (ENOMEM);
}
fp->f_flag = FREAD | FWRITE;
fp->f_ops = &kqueueops;
fp->f_data = kq;
proc_fdlock(p);
*fdflags(p, fd) |= UF_EXCLOSE;
procfdtbl_releasefd(p, fd, NULL);
fp_drop(p, fd, fp, 1);
proc_fdunlock(p);
*retval = fd;
return (error);
}
int
kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
{
return (kqueue_body(p, fileproc_alloc_init, NULL, retval));
}
static int
kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p,
unsigned int flags)
{
int advance;
int error;
if (flags & KEVENT_FLAG_LEGACY32) {
bzero(kevp, sizeof (*kevp));
if (IS_64BIT_PROCESS(p)) {
struct user64_kevent kev64;
advance = sizeof (kev64);
error = copyin(*addrp, (caddr_t)&kev64, advance);
if (error)
return (error);
kevp->ident = kev64.ident;
kevp->filter = kev64.filter;
kevp->flags = kev64.flags;
kevp->udata = kev64.udata;
kevp->fflags = kev64.fflags;
kevp->data = kev64.data;
} else {
struct user32_kevent kev32;
advance = sizeof (kev32);
error = copyin(*addrp, (caddr_t)&kev32, advance);
if (error)
return (error);
kevp->ident = (uintptr_t)kev32.ident;
kevp->filter = kev32.filter;
kevp->flags = kev32.flags;
kevp->udata = CAST_USER_ADDR_T(kev32.udata);
kevp->fflags = kev32.fflags;
kevp->data = (intptr_t)kev32.data;
}
} else if (flags & KEVENT_FLAG_LEGACY64) {
struct kevent64_s kev64;
bzero(kevp, sizeof (*kevp));
advance = sizeof (struct kevent64_s);
error = copyin(*addrp, (caddr_t)&kev64, advance);
if (error)
return(error);
kevp->ident = kev64.ident;
kevp->filter = kev64.filter;
kevp->flags = kev64.flags;
kevp->udata = kev64.udata;
kevp->fflags = kev64.fflags;
kevp->data = kev64.data;
kevp->ext[0] = kev64.ext[0];
kevp->ext[1] = kev64.ext[1];
} else {
struct kevent_qos_s kevqos;
bzero(kevp, sizeof (*kevp));
advance = sizeof (struct kevent_qos_s);
error = copyin(*addrp, (caddr_t)&kevqos, advance);
if (error)
return error;
kevp->ident = kevqos.ident;
kevp->filter = kevqos.filter;
kevp->flags = kevqos.flags;
kevp->qos = kevqos.qos;
kevp->udata = kevqos.udata;
kevp->fflags = kevqos.fflags;
kevp->data = kevqos.data;
kevp->ext[0] = kevqos.ext[0];
kevp->ext[1] = kevqos.ext[1];
kevp->ext[2] = kevqos.ext[2];
kevp->ext[3] = kevqos.ext[3];
}
if (!error)
*addrp += advance;
return (error);
}
static int
kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p,
unsigned int flags)
{
user_addr_t addr = *addrp;
int advance;
int error;
if (flags & KEVENT_FLAG_LEGACY32) {
assert((flags & KEVENT_FLAG_STACK_EVENTS) == 0);
if (IS_64BIT_PROCESS(p)) {
struct user64_kevent kev64;
advance = sizeof (kev64);
bzero(&kev64, advance);
kev64.ident = (kevp->ident == (uintptr_t)-1) ?
(uint64_t)-1LL : (uint64_t)kevp->ident;
kev64.filter = kevp->filter;
kev64.flags = kevp->flags;
kev64.fflags = kevp->fflags;
kev64.data = (int64_t) kevp->data;
kev64.udata = kevp->udata;
error = copyout((caddr_t)&kev64, addr, advance);
} else {
struct user32_kevent kev32;
advance = sizeof (kev32);
bzero(&kev32, advance);
kev32.ident = (uint32_t)kevp->ident;
kev32.filter = kevp->filter;
kev32.flags = kevp->flags;
kev32.fflags = kevp->fflags;
kev32.data = (int32_t)kevp->data;
kev32.udata = kevp->udata;
error = copyout((caddr_t)&kev32, addr, advance);
}
} else if (flags & KEVENT_FLAG_LEGACY64) {
struct kevent64_s kev64;
advance = sizeof (struct kevent64_s);
if (flags & KEVENT_FLAG_STACK_EVENTS) {
addr -= advance;
}
bzero(&kev64, advance);
kev64.ident = kevp->ident;
kev64.filter = kevp->filter;
kev64.flags = kevp->flags;
kev64.fflags = kevp->fflags;
kev64.data = (int64_t) kevp->data;
kev64.udata = kevp->udata;
kev64.ext[0] = kevp->ext[0];
kev64.ext[1] = kevp->ext[1];
error = copyout((caddr_t)&kev64, addr, advance);
} else {
struct kevent_qos_s kevqos;
advance = sizeof (struct kevent_qos_s);
if (flags & KEVENT_FLAG_STACK_EVENTS) {
addr -= advance;
}
bzero(&kevqos, advance);
kevqos.ident = kevp->ident;
kevqos.filter = kevp->filter;
kevqos.flags = kevp->flags;
kevqos.qos = kevp->qos;
kevqos.udata = kevp->udata;
kevqos.fflags = kevp->fflags;
kevqos.xflags = 0;
kevqos.data = (int64_t) kevp->data;
kevqos.ext[0] = kevp->ext[0];
kevqos.ext[1] = kevp->ext[1];
kevqos.ext[2] = kevp->ext[2];
kevqos.ext[3] = kevp->ext[3];
error = copyout((caddr_t)&kevqos, addr, advance);
}
if (!error) {
if (flags & KEVENT_FLAG_STACK_EVENTS)
*addrp = addr;
else
*addrp = addr + advance;
}
return (error);
}
static int
kevent_get_data_size(struct proc *p,
uint64_t data_available,
unsigned int flags,
user_size_t *residp)
{
user_size_t resid;
int error = 0;
if (data_available != USER_ADDR_NULL) {
if (flags & KEVENT_FLAG_KERNEL) {
resid = *(user_size_t *)(uintptr_t)data_available;
} else if (IS_64BIT_PROCESS(p)) {
user64_size_t usize;
error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
resid = (user_size_t)usize;
} else {
user32_size_t usize;
error = copyin((user_addr_t)data_available, &usize, sizeof(usize));
resid = (user_size_t)usize;
}
if (error)
return(error);
} else {
resid = 0;
}
*residp = resid;
return 0;
}
static int
kevent_put_data_size(struct proc *p,
uint64_t data_available,
unsigned int flags,
user_size_t resid)
{
int error = 0;
if (data_available) {
if (flags & KEVENT_FLAG_KERNEL) {
*(user_size_t *)(uintptr_t)data_available = resid;
} else if (IS_64BIT_PROCESS(p)) {
user64_size_t usize = (user64_size_t)resid;
error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
} else {
user32_size_t usize = (user32_size_t)resid;
error = copyout(&usize, (user_addr_t)data_available, sizeof(usize));
}
}
return error;
}
__attribute__((noreturn))
static void
kevent_continue(__unused struct kqueue *kq, void *data, int error)
{
struct _kevent *cont_args;
struct fileproc *fp;
uint64_t data_available;
user_size_t data_size;
user_size_t data_resid;
unsigned int flags;
int32_t *retval;
int noutputs;
int fd;
struct proc *p = current_proc();
cont_args = (struct _kevent *)data;
data_available = cont_args->data_available;
flags = cont_args->process_data.fp_flags;
data_size = cont_args->process_data.fp_data_size;
data_resid = cont_args->process_data.fp_data_resid;
noutputs = cont_args->eventout;
retval = cont_args->retval;
fd = cont_args->fd;
fp = cont_args->fp;
if (fp != NULL)
fp_drop(p, fd, fp, 0);
if (error == 0 && data_available && data_resid != data_size) {
(void)kevent_put_data_size(p, data_available, flags, data_resid);
}
if (error == ERESTART)
error = EINTR;
else if (error == EWOULDBLOCK)
error = 0;
if (error == 0)
*retval = noutputs;
unix_syscall_return(error);
}
int
kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
{
unsigned int flags = KEVENT_FLAG_LEGACY32;
return kevent_internal(p,
uap->fd,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
0ULL, 0ULL,
flags,
uap->timeout,
kevent_continue,
retval);
}
int
kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
{
unsigned int flags;
flags = uap->flags & KEVENT_FLAG_USER;
flags |= KEVENT_FLAG_LEGACY64;
return kevent_internal(p,
uap->fd,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
0ULL, 0ULL,
flags,
uap->timeout,
kevent_continue,
retval);
}
int
kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
{
uap->flags &= KEVENT_FLAG_USER;
return kevent_internal(p,
uap->fd,
uap->changelist, uap->nchanges,
uap->eventlist, uap->nevents,
uap->data_out, (uint64_t)uap->data_available,
uap->flags,
0ULL,
kevent_continue,
retval);
}
int
kevent_qos_internal(struct proc *p, int fd,
user_addr_t changelist, int nchanges,
user_addr_t eventlist, int nevents,
user_addr_t data_out, user_size_t *data_available,
unsigned int flags,
int32_t *retval)
{
return kevent_internal(p,
fd,
changelist, nchanges,
eventlist, nevents,
data_out, (uint64_t)data_available,
(flags | KEVENT_FLAG_KERNEL),
0ULL,
NULL,
retval);
}
static int
kevent_get_timeout(struct proc *p,
user_addr_t utimeout,
unsigned int flags,
struct timeval *atvp)
{
struct timeval atv;
int error = 0;
if (flags & KEVENT_FLAG_IMMEDIATE) {
getmicrouptime(&atv);
} else if (utimeout != USER_ADDR_NULL) {
struct timeval rtv;
if (flags & KEVENT_FLAG_KERNEL) {
struct timespec *tsp = (struct timespec *)utimeout;
TIMESPEC_TO_TIMEVAL(&rtv, tsp);
} else if (IS_64BIT_PROCESS(p)) {
struct user64_timespec ts;
error = copyin(utimeout, &ts, sizeof(ts));
if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0)
error = EINVAL;
else
TIMESPEC_TO_TIMEVAL(&rtv, &ts);
} else {
struct user32_timespec ts;
error = copyin(utimeout, &ts, sizeof(ts));
TIMESPEC_TO_TIMEVAL(&rtv, &ts);
}
if (error)
return (error);
if (itimerfix(&rtv))
return (EINVAL);
getmicrouptime(&atv);
timevaladd(&atv, &rtv);
} else {
atv.tv_sec = 0;
atv.tv_usec = 0;
}
*atvp = atv;
return 0;
}
static int
kevent_set_kq_mode(struct kqueue *kq, unsigned int flags)
{
kqlock(kq);
if (kq->kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) {
if (flags & KEVENT_FLAG_LEGACY32) {
if ((kq->kq_state & KQ_KEV32) == 0) {
kqunlock(kq);
return EINVAL;
}
} else if (kq->kq_state & KQ_KEV32) {
kqunlock(kq);
return EINVAL;
}
} else if (flags & KEVENT_FLAG_LEGACY32) {
kq->kq_state |= KQ_KEV32;
} else {
kq->kq_state |= KQ_KEV64;
}
kqunlock(kq);
return 0;
}
static int
kevent_get_kq(struct proc *p, int fd, unsigned int flags, struct fileproc **fpp, struct kqueue **kqp)
{
struct fileproc *fp = NULL;
struct kqueue *kq;
int error;
if (flags & KEVENT_FLAG_WORKQ) {
kq = p->p_wqkqueue;
if (kq == NULL) {
struct kqueue *alloc_kq = kqueue_alloc(p, KEVENT_FLAG_WORKQ);
if (alloc_kq == NULL)
return ENOMEM;
proc_fdlock(p);
if (p->p_wqkqueue == NULL) {
kq = p->p_wqkqueue = alloc_kq;
proc_fdunlock(p);
} else {
proc_fdunlock(p);
kq = p->p_wqkqueue;
kqueue_dealloc(alloc_kq);
}
}
} else {
if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
return (error);
}
if ((error = kevent_set_kq_mode(kq, flags)) != 0) {
if (fp != NULL)
fp_drop(p, fd, fp, 0);
return error;
}
*fpp = fp;
*kqp = kq;
return 0;
}
static int
kevent_internal(struct proc *p,
int fd,
user_addr_t changelist, int nchanges,
user_addr_t ueventlist, int nevents,
user_addr_t data_out, uint64_t data_available,
unsigned int flags,
user_addr_t utimeout,
kqueue_continue_t continuation,
int32_t *retval)
{
struct _kevent *cont_args;
uthread_t ut;
struct kqueue *kq;
struct fileproc *fp = NULL;
struct kevent_internal_s kev;
int error, noutputs;
struct timeval atv;
user_size_t data_size;
user_size_t data_resid;
if ((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ &&
!(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0)
return EINVAL;
if (flags & KEVENT_FLAG_STACK_EVENTS) {
int scale = ((flags & KEVENT_FLAG_LEGACY32) ?
(IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) :
sizeof(struct user32_kevent)) :
((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) :
sizeof(struct kevent_qos_s)));
ueventlist += nevents * scale;
}
error = kevent_get_timeout(p, utimeout, flags, &atv);
if (error)
return error;
error = kevent_get_data_size(p, data_available, flags, &data_size);
if (error)
return error;
error = kevent_get_kq(p, fd, flags, &fp, &kq);
if (error)
return error;
noutputs = 0;
while (nchanges > 0 && error == 0) {
error = kevent_copyin(&changelist, &kev, p, flags);
if (error)
break;
kev.flags &= ~EV_SYSFLAGS;
kevent_register(kq, &kev, p);
if (nevents > 0 &&
((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) {
if (kev.flags & EV_RECEIPT) {
kev.flags |= EV_ERROR;
kev.data = 0;
}
error = kevent_copyout(&kev, &ueventlist, p, flags);
if (error == 0) {
nevents--;
noutputs++;
}
} else if (kev.flags & EV_ERROR) {
error = kev.data;
}
nchanges--;
}
if (flags & KEVENT_FLAG_ERROR_EVENTS)
nevents = 0;
if (nevents > 0 && noutputs == 0 && error == 0) {
ut = (uthread_t)get_bsdthread_info(current_thread());
cont_args = &ut->uu_kevent.ss_kevent;
cont_args->fp = fp;
cont_args->fd = fd;
cont_args->retval = retval;
cont_args->eventlist = ueventlist;
cont_args->eventcount = nevents;
cont_args->eventout = noutputs;
cont_args->data_available = data_available;
cont_args->process_data.fp_fd = fd;
cont_args->process_data.fp_flags = flags;
cont_args->process_data.fp_data_out = data_out;
cont_args->process_data.fp_data_size = data_size;
cont_args->process_data.fp_data_resid = data_size;
error = kqueue_scan(kq, kevent_callback,
continuation, cont_args,
&cont_args->process_data,
&atv, p);
noutputs = cont_args->eventout;
data_resid = cont_args->process_data.fp_data_resid;
if (error == 0 && data_available && data_resid != data_size) {
(void)kevent_put_data_size(p, data_available, flags, data_resid);
}
}
if (error == ERESTART)
error = EINTR;
else if (error == EWOULDBLOCK)
error = 0;
if (error == 0)
*retval = noutputs;
if (fp != NULL)
fp_drop(p, fd, fp, 0);
return (error);
}
static int
kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp,
void *data)
{
struct _kevent *cont_args;
int error;
cont_args = (struct _kevent *)data;
assert(cont_args->eventout < cont_args->eventcount);
error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(),
cont_args->process_data.fp_flags);
if (error == 0 && ++cont_args->eventout == cont_args->eventcount)
error = EWOULDBLOCK;
return (error);
}
char *
kevent_description(struct kevent_internal_s *kevp, char *s, size_t n)
{
snprintf(s, n,
"kevent="
"{.ident=%#llx, .filter=%d, .flags=%#x, .udata=%#llx, .fflags=%#x, .data=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}",
kevp->ident,
kevp->filter,
kevp->flags,
kevp->udata,
kevp->fflags,
kevp->data,
kevp->ext[0],
kevp->ext[1] );
return (s);
}
void
kevent_register(struct kqueue *kq, struct kevent_internal_s *kev,
__unused struct proc *ctxp)
{
struct proc *p = kq->kq_p;
struct filterops *fops;
struct knote *kn = NULL;
int result = 0;
int error = 0;
if (kev->filter < 0) {
if (kev->filter + EVFILT_SYSCOUNT < 0) {
error = EINVAL;
goto out;
}
fops = sysfilt_ops[~kev->filter];
} else {
error = EINVAL;
goto out;
}
if ((kev->flags & EV_VANISHED) &&
(kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) {
error = EINVAL;
goto out;
}
if (kev->flags & EV_DELETE)
kev->flags &= ~EV_ADD;
if (kev->flags & EV_DISABLE)
kev->flags &= ~EV_ENABLE;
restart:
proc_fdlock(p);
kn = knote_fdfind(kq, kev, p);
if (kn == NULL) {
if (kev->flags & EV_ADD) {
struct fileproc *fp = NULL;
if (fops->f_isfd) {
if ((error = fp_lookup(p, kev->ident, &fp, 1)) != 0) {
proc_fdunlock(p);
goto out;
}
}
kn = knote_alloc();
if (kn == NULL) {
proc_fdunlock(p);
error = ENOMEM;
if (fp != NULL)
fp_drop(p, kev->ident, fp, 0);
goto out;
}
kn->kn_fp = fp;
knote_set_kq(kn,kq);
kn->kn_filtid = ~kev->filter;
kn->kn_inuse = 1;
kn->kn_status = KN_ATTACHING | KN_ATTACHED;
if (kev->flags & EV_VANISHED) {
kev->flags &= ~EV_VANISHED;
kn->kn_status |= KN_REQVANISH;
}
if (kev->flags & EV_DISPATCH)
kn->kn_status |= KN_DISPATCH;
if (kev->flags & EV_UDATA_SPECIFIC)
kn->kn_status |= KN_UDATA_SPECIFIC;
kn->kn_kevent = *kev;
kn->kn_sfflags = kev->fflags;
kn->kn_sdata = kev->data;
kn->kn_fflags = 0;
kn->kn_data = 0;
if (kq->kq_state & KQ_WORKQ) {
kn->kn_qos = canonicalize_kevent_qos(kn->kn_qos);
knote_set_qos_index(kn, qos_index_from_qos(kn->kn_qos, FALSE));
knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
assert(knote_get_qos_index(kn) < KQWQ_NQOS);
} else {
knote_set_qos_index(kn, QOS_INDEX_KQFILE);
knote_set_qos_override_index(kn, QOS_INDEX_KQFILE);
}
if (kev->flags & EV_DISABLE)
knote_disable(kn);
error = knote_fdadd(kn, p);
proc_fdunlock(p);
if (error) {
knote_free(kn);
if (fp != NULL)
fp_drop(p, kev->ident, fp, 0);
goto out;
}
result = fops->f_attach(kn);
knoteuse2kqlock(kq, kn, 1);
if (kn->kn_flags & EV_ERROR) {
kn->kn_status &= ~KN_ATTACHED;
kn->kn_status |= KN_DROPPING;
error = kn->kn_data;
kqunlock(kq);
knote_drop(kn, p);
goto out;
}
kn->kn_status &= ~KN_ATTACHING;
if (kn->kn_status & KN_DROPPING) {
kqunlock(kq);
knote_drop(kn, p);
goto out;
}
if (result)
knote_activate(kn);
} else {
proc_fdunlock(p);
error = ENOENT;
goto out;
}
} else {
kqlock(kq);
proc_fdunlock(p);
if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) {
kn->kn_status |= KN_USEWAIT;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
goto restart;
}
if (kev->flags & EV_DELETE) {
if ((kev->flags & EV_ENABLE) == 0 &&
(kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) ==
(KN_DISPATCH2 | KN_DISABLED)) {
kn->kn_status |= KN_DEFERDELETE;
kqunlock(kq);
error = EINPROGRESS;
} else if (kqlock2knotedrop(kq, kn)) {
knote_drop(kn, p);
} else {
error = EINPROGRESS;
}
goto out;
}
if ((kev->flags & EV_ENABLE) &&
(kn->kn_status & KN_DEFERDELETE)) {
assert(kn->kn_status & KN_DISABLED);
knote_activate(kn);
knote_enable(kn);
kqunlock(kq);
goto out;
}
if (kev->flags & EV_DISABLE)
knote_disable(kn);
if (kqlock2knoteuse(kq, kn)) {
result = knote_fops(kn)->f_touch(kn, kev);
if (!knoteuse2kqlock(kq, kn, 0)) {
kqunlock(kq);
goto out;
}
if (result)
knote_activate(kn);
}
if (kev->flags & EV_ENABLE)
knote_enable(kn);
}
kqunlock(kq);
out:
if (error) {
kev->flags |= EV_ERROR;
kev->data = error;
}
}
static int
knote_process(struct knote *kn,
kevent_callback_t callback,
void *callback_data,
struct filt_process_s *process_data,
struct proc *p)
{
struct kevent_internal_s kev;
struct kqueue *kq = knote_get_kq(kn);
int result = 0;
int error = 0;
bzero(&kev, sizeof(kev));
assert(kn->kn_status & KN_QUEUED);
assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE));
assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING)));
if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
kev.filter = kn->kn_filter;
kev.ident = kn->kn_id;
kev.qos = kn->kn_qos;
kev.flags = (kn->kn_status & KN_DEFERDELETE) ?
EV_DELETE : EV_VANISHED;
kev.flags |= (EV_DISPATCH2 | EV_ONESHOT);
kev.udata = kn->kn_udata;
result = 1;
knote_suppress(kn);
} else {
knote_deactivate(kn);
knote_suppress(kn);
if (!kqlock2knoteuse(kq, kn))
panic("dropping knote found on queue\n");
result = knote_fops(kn)->f_process(kn, process_data, &kev);
if (!knoteuse2kqlock(kq, kn, result)) {
kn = NULL;
}
}
if (kn != NULL) {
if (result == 0)
return (EJUSTRETURN);
if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) {
if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) {
kn->kn_status |= KN_DEFERDELETE;
knote_disable(kn);
if (kn->kn_status & KN_STOLENDROP) {
assert(kn->kn_status & KN_DROPPING);
kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP);
}
} else if (kn->kn_status & KN_STOLENDROP) {
assert(kn->kn_status & KN_DROPPING);
knote_unsuppress(kn);
kqunlock(kq);
knote_drop(kn, p);
kqlock(kq);
} else if (kqlock2knotedrop(kq, kn)) {
knote_drop(kn, p);
kqlock(kq);
}
} else if (kn->kn_status & KN_DISPATCH) {
knote_disable(kn);
} else if ((kev.flags & EV_CLEAR) == 0) {
knote_activate(kn);
}
}
if (result) {
kqunlock(kq);
error = (callback)(kq, &kev, callback_data);
kqlock(kq);
}
return (error);
}
static int
kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
{
struct kqrequest *kqr;
thread_t self = current_thread();
__assert_only struct uthread *ut = get_bsdthread_info(self);
thread_t thread;
assert(kqwq->kqwq_state & KQ_WORKQ);
assert(qos_index < KQWQ_NQOS);
kqwq_req_lock(kqwq);
kqr = kqworkq_get_request(kqwq, qos_index);
thread = kqr->kqr_thread;
if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
assert(kqr->kqr_thread != self);
kqwq_req_unlock(kqwq);
return -1;
}
kqworkq_bind_thread(kqwq, qos_index, self, flags);
} else {
assert(thread == self);
assert(ut->uu_kqueue_bound == qos_index);
assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags);
}
assert(kqr->kqr_state & KQWQ_THREQUESTED);
assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
if (kqueue_queue_empty(&kqwq->kqwq_kqueue, qos_index)) {
kqwq_req_unlock(kqwq);
return -1;
}
kqr->kqr_state &= ~(KQWQ_HOOKCALLED | KQWQ_WAKEUP);
kqr->kqr_state |= KQWQ_PROCESSING;
kqwq_req_unlock(kqwq);
return 0;
}
static int
kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
{
struct kqtailq *suppressq;
if (kq->kq_state & KQ_WORKQ)
return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags);
assert(qos_index == QOS_INDEX_KQFILE);
for (;;) {
if (kq->kq_state & KQ_DRAIN)
return -1;
if ((kq->kq_state & KQ_PROCESSING) == 0)
break;
kq->kq_state |= KQ_PROCWAIT;
suppressq = kqueue_get_suppressed_queue(kq, qos_index);
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq),
THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
kqunlock(kq);
thread_block(THREAD_CONTINUE_NULL);
kqlock(kq);
}
waitq_set_clear_preposts(&kq->kq_wqs);
kq->kq_state &= ~KQ_WAKEUP;
if (kqueue_queue_empty(kq, qos_index))
return -1;
kq->kq_state |= KQ_PROCESSING;
return 0;
}
static void
kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags)
{
#pragma unused(flags)
struct kqueue *kq = &kqwq->kqwq_kqueue;
struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index);
thread_t self = current_thread();
__assert_only struct uthread *ut = get_bsdthread_info(self);
struct knote *kn;
struct kqrequest *kqr;
int queued_events;
uint16_t pended;
thread_t thread;
assert(kqwq->kqwq_state & KQ_WORKQ);
assert(qos_index < KQWQ_NQOS);
kqwq_req_lock(kqwq);
kqr = kqworkq_get_request(kqwq, qos_index);
thread = kqr->kqr_thread;
if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
assert(thread != self);
kqwq_req_unlock(kqwq);
return;
}
assert(kqr->kqr_state & KQWQ_THREQUESTED);
if (thread == THREAD_NULL) {
assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
assert(TAILQ_EMPTY(suppressq));
} else {
assert(thread == self);
}
} else {
assert(thread == self);
assert(ut->uu_kqueue_bound == qos_index);
assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0);
}
kqwq_req_unlock(kqwq);
queued_events = !kqueue_queue_empty(kq, qos_index);
while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
assert(kn->kn_status & KN_SUPPRESSED);
knote_unsuppress(kn);
}
kqwq_req_lock(kqwq);
pended = (kqr->kqr_state & (KQWQ_HOOKCALLED | KQWQ_WAKEUP));
kqworkq_unbind_thread(kqwq, qos_index, self, flags);
kqr->kqr_state &= ~(KQWQ_PROCESSING | \
KQWQ_THREQUESTED | KQWQ_THMANAGER);
if ((queued_events || pended) &&
!kqueue_queue_empty(kq, qos_index)) {
kqworkq_request_thread(kqwq, qos_index);
}
kqwq_req_unlock(kqwq);
}
static void
kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags)
{
struct knote *kn;
struct kqtailq *suppressq;
int procwait;
if (kq->kq_state & KQ_WORKQ) {
kqworkq_end_processing((struct kqworkq *)kq, qos_index, flags);
return;
}
assert(qos_index == QOS_INDEX_KQFILE);
suppressq = kqueue_get_suppressed_queue(kq, qos_index);
while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
assert(kn->kn_status & KN_SUPPRESSED);
knote_unsuppress(kn);
}
procwait = (kq->kq_state & KQ_PROCWAIT);
kq->kq_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
if (procwait) {
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq),
THREAD_AWAKENED,
WAITQ_ALL_PRIORITIES);
}
}
int
kevent_qos_internal_bind(
struct proc *p,
int qos_class,
thread_t thread,
unsigned int flags)
{
struct fileproc *fp = NULL;
struct kqueue *kq = NULL;
struct kqworkq *kqwq;
struct kqrequest *kqr;
struct uthread *ut;
kq_index_t qos_index;
int res = 0;
assert(thread != THREAD_NULL);
assert(flags & KEVENT_FLAG_WORKQ);
if (thread == THREAD_NULL ||
(flags & KEVENT_FLAG_WORKQ) == 0) {
return EINVAL;
}
ut = get_bsdthread_info(thread);
res = kevent_get_kq(p, -1, flags, &fp, &kq);
assert(fp == NULL);
if (res)
return res;
qos_index = qos_index_for_servicer(qos_class, thread, flags);
if (qos_index == KQWQ_QOS_MANAGER) {
assert(ut->uu_kqueue_bound == 0);
ut->uu_kqueue_bound = qos_index;
ut->uu_kqueue_flags = flags;
return 0;
}
kqlock(kq);
assert(kq->kq_state & KQ_WORKQ);
kqwq = (struct kqworkq *)kq;
kqr = kqworkq_get_request(kqwq, qos_index);
kqwq_req_lock(kqwq);
assert(kqr->kqr_state & KQWQ_THREQUESTED);
assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
assert((kqr->kqr_state & KQWQ_PROCESSING) == 0);
if (thread == kqr->kqr_thread) {
assert(ut->uu_kqueue_bound == qos_index);
goto out;
}
assert(ut->uu_kqueue_bound == 0);
assert(ut->uu_kqueue_flags == 0);
assert(kqr->kqr_thread == THREAD_NULL);
if (kqr->kqr_thread != THREAD_NULL ||
(kqr->kqr_state & KQWQ_THMANAGER)) {
res = EINPROGRESS;
goto out;
}
kqr->kqr_thread = thread;
ut->uu_kqueue_bound = qos_index;
ut->uu_kqueue_flags = flags;
if (kqr->kqr_override_delta) {
thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
}
out:
kqwq_req_unlock(kqwq);
kqunlock(kq);
return res;
}
int
kevent_qos_internal_unbind(
struct proc *p,
int qos_class,
thread_t thread,
unsigned int flags)
{
struct kqueue *kq;
struct uthread *ut;
struct fileproc *fp = NULL;
kq_index_t qos_index;
kq_index_t end_index;
int res;
assert(flags & KEVENT_FLAG_WORKQ);
assert(thread == current_thread());
if (thread == THREAD_NULL ||
(flags & KEVENT_FLAG_WORKQ) == 0)
return EINVAL;
res = kevent_get_kq(p, -1, flags, &fp, &kq);
assert(fp == NULL);
if (res)
return res;
assert(kq->kq_state & KQ_WORKQ);
qos_index = qos_index_for_servicer(qos_class, thread, flags);
ut = get_bsdthread_info(thread);
if (ut->uu_kqueue_bound != qos_index) {
__assert_only struct kqworkq *kqwq = (struct kqworkq *)kq;
__assert_only struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
assert(ut->uu_kqueue_bound == 0);
assert(ut->uu_kqueue_flags == 0);
assert(kqr->kqr_thread != thread);
return EALREADY;
}
end_index = (qos_index == KQWQ_QOS_MANAGER) ?
0 : qos_index;
kqlock(kq);
do {
kqueue_end_processing(kq, qos_index, flags);
} while (qos_index-- > end_index);
kqunlock(kq);
ut->uu_kqueue_bound = 0;
ut->uu_kqueue_flags = 0;
return 0;
}
static int
kqueue_process(struct kqueue *kq,
kevent_callback_t callback,
void *callback_data,
struct filt_process_s *process_data,
kq_index_t servicer_qos_index,
int *countp,
struct proc *p)
{
unsigned int flags = process_data ? process_data->fp_flags : 0;
kq_index_t start_index, end_index, i;
struct knote *kn;
int nevents = 0;
int error = 0;
start_index = servicer_qos_index;
end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index;
i = start_index;
do {
if (kqueue_begin_processing(kq, i, flags) == -1) {
*countp = 0;
continue;
}
error = 0;
struct kqtailq *base_queue = kqueue_get_base_queue(kq, i);
struct kqtailq *queue = kqueue_get_high_queue(kq, i);
do {
while (error == 0 &&
(kn = TAILQ_FIRST(queue)) != NULL) {
error = knote_process(kn, callback, callback_data, process_data, p);
if (error == EJUSTRETURN)
error = 0;
else
nevents++;
if (error == EWOULDBLOCK) {
if ((kq->kq_state & KQ_WORKQ) == 0)
kqueue_end_processing(kq, i, flags);
error = 0;
goto out;
}
}
} while (error == 0 && queue-- > base_queue);
if ((kq->kq_state & KQ_WORKQ) == 0)
kqueue_end_processing(kq, i, flags);
} while (i-- > end_index);
out:
*countp = nevents;
return (error);
}
static void
kqueue_scan_continue(void *data, wait_result_t wait_result)
{
thread_t self = current_thread();
uthread_t ut = (uthread_t)get_bsdthread_info(self);
struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan;
struct kqueue *kq = (struct kqueue *)data;
struct filt_process_s *process_data = cont_args->process_data;
int error;
int count;
switch (wait_result) {
case THREAD_AWAKENED: {
kqlock(kq);
retry:
error = kqueue_process(kq, cont_args->call, cont_args->data,
process_data, cont_args->servicer_qos_index,
&count, current_proc());
if (error == 0 && count == 0) {
if (kq->kq_state & KQ_WAKEUP)
goto retry;
waitq_assert_wait64((struct waitq *)&kq->kq_wqs,
KQ_EVENT, THREAD_ABORTSAFE,
cont_args->deadline);
kq->kq_state |= KQ_SLEEP;
kqunlock(kq);
thread_block_parameter(kqueue_scan_continue, kq);
}
kqunlock(kq);
} break;
case THREAD_TIMED_OUT:
error = EWOULDBLOCK;
break;
case THREAD_INTERRUPTED:
error = EINTR;
break;
case THREAD_RESTART:
error = EBADF;
break;
default:
panic("%s: - invalid wait_result (%d)", __func__,
wait_result);
error = 0;
}
assert(cont_args->cont != NULL);
(cont_args->cont)(kq, cont_args->data, error);
}
int
kqueue_scan(struct kqueue *kq,
kevent_callback_t callback,
kqueue_continue_t continuation,
void *callback_data,
struct filt_process_s *process_data,
struct timeval *atvp,
struct proc *p)
{
thread_continue_t cont = THREAD_CONTINUE_NULL;
kq_index_t servicer_qos_index;
unsigned int flags;
uint64_t deadline;
int error;
int first;
int fd;
assert(callback != NULL);
flags = (process_data) ? process_data->fp_flags : 0;
fd = (process_data) ? process_data->fp_fd : -1;
servicer_qos_index = (kq->kq_state & KQ_WORKQ) ?
qos_index_for_servicer(fd, current_thread(), flags) :
QOS_INDEX_KQFILE;
first = 1;
for (;;) {
wait_result_t wait_result;
int count;
kqlock(kq);
error = kqueue_process(kq, callback, callback_data,
process_data, servicer_qos_index,
&count, p);
if (error || count)
break;
if (first) {
first = 0;
if (atvp->tv_sec || atvp->tv_usec) {
uint64_t now;
clock_get_uptime(&now);
nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC +
atvp->tv_usec * (long)NSEC_PER_USEC,
&deadline);
if (now >= deadline) {
error = EWOULDBLOCK;
break;
}
deadline -= now;
clock_absolutetime_interval_to_deadline(deadline, &deadline);
} else {
deadline = 0;
}
if (continuation) {
uthread_t ut = (uthread_t)get_bsdthread_info(current_thread());
struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan;
cont_args->call = callback;
cont_args->cont = continuation;
cont_args->deadline = deadline;
cont_args->data = callback_data;
cont_args->process_data = process_data;
cont_args->servicer_qos_index = servicer_qos_index;
cont = kqueue_scan_continue;
}
}
if (kq->kq_state & KQ_WAKEUP) {
kqunlock(kq);
continue;
}
waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs,
KQ_EVENT, THREAD_ABORTSAFE,
TIMEOUT_URGENCY_USER_NORMAL,
deadline, TIMEOUT_NO_LEEWAY);
kq->kq_state |= KQ_SLEEP;
kqunlock(kq);
wait_result = thread_block_parameter(cont, kq);
switch (wait_result) {
case THREAD_AWAKENED:
continue;
case THREAD_TIMED_OUT:
return EWOULDBLOCK;
case THREAD_INTERRUPTED:
return EINTR;
case THREAD_RESTART:
return EBADF;
default:
panic("%s: - bad wait_result (%d)", __func__,
wait_result);
error = 0;
}
}
kqunlock(kq);
return (error);
}
static int
kqueue_read(__unused struct fileproc *fp,
__unused struct uio *uio,
__unused int flags,
__unused vfs_context_t ctx)
{
return (ENXIO);
}
static int
kqueue_write(__unused struct fileproc *fp,
__unused struct uio *uio,
__unused int flags,
__unused vfs_context_t ctx)
{
return (ENXIO);
}
static int
kqueue_ioctl(__unused struct fileproc *fp,
__unused u_long com,
__unused caddr_t data,
__unused vfs_context_t ctx)
{
return (ENOTTY);
}
static int
kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
__unused vfs_context_t ctx)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
struct kqtailq *queue;
struct kqtailq *suppressq;
struct knote *kn;
int retnum = 0;
if (which != FREAD)
return (0);
kqlock(kq);
assert((kq->kq_state & KQ_WORKQ) == 0);
if (wq_link_id != NULL) {
thread_t cur_act = current_thread();
struct uthread * ut = get_bsdthread_info(cur_act);
kq->kq_state |= KQ_SEL;
waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset,
WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id);
waitq_link_release(*(uint64_t *)wq_link_id);
*(uint64_t *)wq_link_id = 0;
void *wqptr = &kq->kq_wqs;
memcpy(wq_link_id, (void *)&wqptr, sizeof(void *));
}
if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) {
kqunlock(kq);
return (0);
}
queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE);
if (!TAILQ_EMPTY(queue)) {
while ((kn = (struct knote *)TAILQ_FIRST(queue)) != NULL) {
if (kn->kn_status & KN_ACTIVE) {
retnum = 1;
goto out;
}
assert(kn->kn_status & KN_STAYACTIVE);
knote_suppress(kn);
}
suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
while ((kn = (struct knote *)TAILQ_FIRST(suppressq)) != NULL) {
unsigned peek = 1;
if (kqlock2knoteuse(kq, kn)) {
peek = knote_fops(kn)->f_peek(kn);
if (!knoteuse2kqlock(kq, kn, 0))
continue;
}
knote_unsuppress(kn);
if (peek > 0) {
retnum = 1;
goto out;
}
}
}
out:
kqueue_end_processing(kq, QOS_INDEX_KQFILE, 0);
kqunlock(kq);
return (retnum);
}
static int
kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
{
struct kqfile *kqf = (struct kqfile *)fg->fg_data;
assert((kqf->kqf_state & KQ_WORKQ) == 0);
kqueue_dealloc(&kqf->kqf_kqueue);
fg->fg_data = NULL;
return (0);
}
static int
kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx)
{
struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data;
struct kqueue *kq = &kqf->kqf_kqueue;
struct kqueue *parentkq = knote_get_kq(kn);
assert((kqf->kqf_state & KQ_WORKQ) == 0);
if (parentkq == kq ||
kn->kn_filter != EVFILT_READ) {
kn->kn_flags = EV_ERROR;
kn->kn_data = EINVAL;
return 0;
}
kqlock(parentkq);
if (parentkq->kq_level > 0 &&
parentkq->kq_level < kq->kq_level)
{
kqunlock(parentkq);
kn->kn_flags = EV_ERROR;
kn->kn_data = EINVAL;
return 0;
} else {
if (parentkq->kq_level == 0)
parentkq->kq_level = 2;
if (parentkq->kq_level < kq->kq_level + 1)
parentkq->kq_level = kq->kq_level + 1;
kqunlock(parentkq);
kn->kn_filtid = EVFILTID_KQREAD;
kqlock(kq);
KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
if (kq->kq_level == 0)
kq->kq_level = 1;
int count = kq->kq_count;
kqunlock(kq);
return (count > 0);
}
}
static int
kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
{
struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
assert((kq->kq_state & KQ_WORKQ) == 0);
kqlock(kq);
kq->kq_state |= KQ_DRAIN;
kqueue_interrupt(kq);
kqunlock(kq);
return (0);
}
int
kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
{
assert((kq->kq_state & KQ_WORKQ) == 0);
kqlock(kq);
if (isstat64 != 0) {
struct stat64 *sb64 = (struct stat64 *)ub;
bzero((void *)sb64, sizeof(*sb64));
sb64->st_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
sb64->st_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
sb64->st_blksize = sizeof(struct kevent64_s);
else if (IS_64BIT_PROCESS(p))
sb64->st_blksize = sizeof(struct user64_kevent);
else
sb64->st_blksize = sizeof(struct user32_kevent);
sb64->st_mode = S_IFIFO;
} else {
struct stat *sb = (struct stat *)ub;
bzero((void *)sb, sizeof(*sb));
sb->st_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
sb->st_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
sb->st_blksize = sizeof(struct kevent64_s);
else if (IS_64BIT_PROCESS(p))
sb->st_blksize = sizeof(struct user64_kevent);
else
sb->st_blksize = sizeof(struct user32_kevent);
sb->st_mode = S_IFIFO;
}
kqunlock(kq);
return (0);
}
static void
kqworkq_request_thread(
struct kqworkq *kqwq,
kq_index_t qos_index)
{
struct kqrequest *kqr;
assert(kqwq->kqwq_state & KQ_WORKQ);
assert(qos_index < KQWQ_NQOS);
kqr = kqworkq_get_request(kqwq, qos_index);
if (kqr->kqr_state & KQWQ_THREQUESTED)
return;
assert(kqr->kqr_thread == THREAD_NULL);
if (pthread_functions != NULL &&
pthread_functions->workq_reqthreads != NULL) {
unsigned int flags = KEVENT_FLAG_WORKQ;
struct workq_reqthreads_req_s request = {
.priority = qos_from_qos_index(qos_index),
.count = 1
};
thread_t wqthread;
wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request);
kqr->kqr_state |= KQWQ_THREQUESTED;
if (wqthread == (thread_t)-1) {
flags |= KEVENT_FLAG_WORKQ_MANAGER;
wqthread = THREAD_NULL;
} else if (qos_index == KQWQ_QOS_MANAGER)
flags |= KEVENT_FLAG_WORKQ_MANAGER;
kqworkq_bind_thread(kqwq, qos_index, wqthread, flags);
}
}
static void
kqworkq_request_help(
struct kqworkq *kqwq,
kq_index_t qos_index,
uint32_t type)
{
struct kqrequest *kqr;
assert(qos_index < KQWQ_NQOS);
kqwq_req_lock(kqwq);
kqr = kqworkq_get_request(kqwq, qos_index);
if (kqr->kqr_state & KQWQ_PROCESSING) {
kqr->kqr_state |= type;
kqwq_req_unlock(kqwq);
return;
}
kqworkq_request_thread(kqwq, qos_index);
kqwq_req_unlock(kqwq);
}
static kq_index_t _kq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21};
static kq_index_t _kq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21};
static struct kqtailq *
kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index)
{
assert(qos_index < KQWQ_NQOS);
return &kq->kq_queue[_kq_base_index[qos_index]];
}
static struct kqtailq *
kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index)
{
assert(qos_index < KQWQ_NQOS);
return &kq->kq_queue[_kq_high_index[qos_index]];
}
static int
kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index)
{
struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index);
struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index);
do {
if (!TAILQ_EMPTY(queue))
return 0;
} while (queue-- > base_queue);
return 1;
}
static struct kqtailq *
kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index)
{
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
struct kqrequest *kqr;
kqr = kqworkq_get_request(kqwq, qos_index);
return &kqr->kqr_suppressed;
} else {
struct kqfile *kqf = (struct kqfile *)kq;
return &kqf->kqf_suppressed;
}
}
static kq_index_t
knote_get_queue_index(struct knote *kn)
{
kq_index_t override_index = knote_get_qos_override_index(kn);
kq_index_t qos_index = knote_get_qos_index(kn);
struct kqueue *kq = knote_get_kq(kn);
kq_index_t res;
if ((kq->kq_state & KQ_WORKQ) == 0) {
assert(qos_index == 0);
assert(override_index == 0);
}
res = _kq_base_index[qos_index];
if (override_index > qos_index)
res += override_index - qos_index;
assert(res <= _kq_high_index[qos_index]);
return res;
}
static struct kqtailq *
knote_get_queue(struct knote *kn)
{
kq_index_t qindex = knote_get_queue_index(kn);
return &(knote_get_kq(kn))->kq_queue[qindex];
}
static struct kqtailq *
knote_get_suppressed_queue(struct knote *kn)
{
kq_index_t qos_index = knote_get_qos_index(kn);
struct kqueue *kq = knote_get_kq(kn);
return kqueue_get_suppressed_queue(kq, qos_index);
}
static kq_index_t
knote_get_req_index(struct knote *kn)
{
return kn->kn_req_index;
}
static kq_index_t
knote_get_qos_index(struct knote *kn)
{
return kn->kn_qos_index;
}
static void
knote_set_qos_index(struct knote *kn, kq_index_t qos_index)
{
struct kqueue *kq = knote_get_kq(kn);
assert(qos_index < KQWQ_NQOS);
assert((kn->kn_status & KN_QUEUED) == 0);
if (kq->kq_state & KQ_WORKQ)
assert(qos_index > QOS_INDEX_KQFILE);
else
assert(qos_index == QOS_INDEX_KQFILE);
kn->kn_req_index = qos_index;
if ((kn->kn_status & KN_SUPPRESSED) == 0)
kn->kn_qos_index = qos_index;
}
static kq_index_t
knote_get_qos_override_index(struct knote *kn)
{
return kn->kn_qos_override;
}
static void
knote_set_qos_override_index(struct knote *kn, kq_index_t override_index)
{
struct kqueue *kq = knote_get_kq(kn);
kq_index_t qos_index = knote_get_qos_index(kn);
assert((kn->kn_status & KN_QUEUED) == 0);
if (override_index == KQWQ_QOS_MANAGER)
assert(qos_index == KQWQ_QOS_MANAGER);
else
assert(override_index < KQWQ_QOS_MANAGER);
kn->kn_qos_override = override_index;
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
assert(qos_index > QOS_INDEX_KQFILE);
kqworkq_update_override(kqwq, qos_index, override_index);
}
}
static void
kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index)
{
struct kqrequest *kqr;
kq_index_t new_delta;
kq_index_t old_delta;
new_delta = (override_index > qos_index) ?
override_index - qos_index : 0;
kqr = kqworkq_get_request(kqwq, qos_index);
kqwq_req_lock(kqwq);
old_delta = kqr->kqr_override_delta;
if (new_delta > old_delta) {
thread_t wqthread = kqr->kqr_thread;
kqr->kqr_override_delta = new_delta;
if (wqthread) {
if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
if (old_delta)
thread_update_ipc_override(wqthread, override_index);
else
thread_add_ipc_override(wqthread, override_index);
}
}
}
kqwq_req_unlock(kqwq);
}
static void
kqworkq_bind_thread(
struct kqworkq *kqwq,
kq_index_t qos_index,
thread_t thread,
unsigned int flags)
{
struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
thread_t old_thread = kqr->kqr_thread;
struct uthread *ut;
assert(kqr->kqr_state & KQWQ_THREQUESTED);
if (thread == THREAD_NULL) {
assert(old_thread == THREAD_NULL);
if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
kqr->kqr_state |= KQWQ_THMANAGER;
}
return;
}
ut = get_bsdthread_info(thread);
if (flags & KEVENT_FLAG_WORKQ_MANAGER) {
if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
assert(old_thread == THREAD_NULL);
kqr->kqr_state |= KQWQ_THMANAGER;
} else if (old_thread == THREAD_NULL) {
kqr->kqr_thread = thread;
ut->uu_kqueue_bound = KQWQ_QOS_MANAGER;
ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ |
KEVENT_FLAG_WORKQ_MANAGER);
} else {
assert(thread == old_thread);
assert(ut->uu_kqueue_bound == KQWQ_QOS_MANAGER);
assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER);
}
return;
}
assert(old_thread == THREAD_NULL);
assert((kqr->kqr_state & KQWQ_THMANAGER) == 0);
kqr->kqr_thread = thread;
if (kqr->kqr_override_delta)
thread_add_ipc_override(thread, qos_index + kqr->kqr_override_delta);
ut->uu_kqueue_bound = qos_index;
ut->uu_kqueue_flags = flags;
}
static void
kqworkq_unbind_thread(
struct kqworkq *kqwq,
kq_index_t qos_index,
thread_t thread,
__unused unsigned int flags)
{
struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index);
kq_index_t override = 0;
assert(thread == current_thread());
if (kqr->kqr_override_delta) {
struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index);
struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index);
if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) {
assert(thread == kqr->kqr_thread);
thread_drop_ipc_override(thread);
}
do {
if (!TAILQ_EMPTY(queue)) {
override = queue - base_queue;
break;
}
} while (queue-- > base_queue);
}
kqr->kqr_thread = THREAD_NULL;
kqr->kqr_override_delta = override;
}
struct kqrequest *
kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
{
assert(qos_index < KQWQ_NQOS);
return &kqwq->kqwq_request[qos_index];
}
void
knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override)
{
if (knote_get_kq(kn)->kq_state & KQ_WORKQ) {
kq_index_t new_qos_index;
kq_index_t new_override_index;
kq_index_t servicer_qos_index;
new_qos_index = qos_index_from_qos(new_qos, FALSE);
new_override_index = qos_index_from_qos(new_override, TRUE);
servicer_qos_index = qos_index_from_qos(kn->kn_qos, FALSE);
if (servicer_qos_index > new_qos_index)
new_qos_index = servicer_qos_index;
if (servicer_qos_index > new_override_index)
new_override_index = servicer_qos_index;
kqlock(knote_get_kq(kn));
if (new_qos_index != knote_get_req_index(kn) ||
new_override_index != knote_get_qos_override_index(kn)) {
if (kn->kn_status & KN_QUEUED) {
knote_dequeue(kn);
knote_set_qos_index(kn, new_qos_index);
knote_set_qos_override_index(kn, new_override_index);
knote_enqueue(kn);
knote_wakeup(kn);
} else {
knote_set_qos_index(kn, new_qos_index);
knote_set_qos_override_index(kn, new_override_index);
}
}
kqunlock(knote_get_kq(kn));
}
}
static void
knote_wakeup(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
if (kq->kq_state & KQ_WORKQ) {
struct kqworkq *kqwq = (struct kqworkq *)kq;
kq_index_t qos_index = knote_get_qos_index(kn);
kqworkq_request_help(kqwq, qos_index, KQWQ_WAKEUP);
} else {
struct kqfile *kqf = (struct kqfile *)kq;
if (kq->kq_state & KQ_PROCESSING)
kq->kq_state |= KQ_WAKEUP;
if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) {
kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
KQ_EVENT,
THREAD_AWAKENED,
WAITQ_ALL_PRIORITIES);
}
KNOTE(&kqf->kqf_sel.si_note, 0);
}
}
static void
kqueue_interrupt(struct kqueue *kq)
{
assert((kq->kq_state & KQ_WORKQ) == 0);
if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) {
kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
(void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
KQ_EVENT,
THREAD_RESTART,
WAITQ_ALL_PRIORITIES);
}
if (kq->kq_state & KQ_PROCWAIT) {
struct kqtailq *suppressq;
assert(kq->kq_state & KQ_PROCESSING);
kq->kq_state &= ~KQ_PROCWAIT;
suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE);
(void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(suppressq),
THREAD_RESTART,
WAITQ_ALL_PRIORITIES);
}
}
void
waitq_set__CALLING_PREPOST_HOOK__(void *kq_hook, void *knote_hook, int qos)
{
#pragma unused(knote_hook, qos)
struct kqworkq *kqwq = (struct kqworkq *)kq_hook;
assert(kqwq->kqwq_state & KQ_WORKQ);
kqworkq_request_help(kqwq, KQWQ_QOS_MANAGER, KQWQ_HOOKCALLED);
}
void
klist_init(struct klist *list)
{
SLIST_INIT(list);
}
void
knote(struct klist *list, long hint)
{
struct knote *kn;
SLIST_FOREACH(kn, list, kn_selnext) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
if (kqlock2knoteuse(kq, kn)) {
int result;
result = knote_fops(kn)->f_event(kn, hint);
if (knoteuse2kqlock(kq, kn, 0) && result)
knote_activate(kn);
}
kqunlock(kq);
}
}
int
knote_attach(struct klist *list, struct knote *kn)
{
int ret = SLIST_EMPTY(list);
SLIST_INSERT_HEAD(list, kn, kn_selnext);
return (ret);
}
int
knote_detach(struct klist *list, struct knote *kn)
{
SLIST_REMOVE(list, kn, knote, kn_selnext);
return (SLIST_EMPTY(list));
}
void
knote_vanish(struct klist *list)
{
struct knote *kn;
struct knote *kn_next;
SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
struct kqueue *kq = knote_get_kq(kn);
int result;
kqlock(kq);
if ((kn->kn_status & KN_DROPPING) == 0) {
if (kn->kn_status & KN_REQVANISH) {
kn->kn_status |= KN_VANISHED;
knote_activate(kn);
} else if (kqlock2knoteuse(kq, kn)) {
result = knote_fops(kn)->f_event(kn, NOTE_REVOKE);
if (knoteuse2kqlock(kq, kn, 0) && result)
knote_activate(kn);
}
}
kqunlock(kq);
}
}
int
knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link)
{
struct kqueue *kq = knote_get_kq(kn);
kern_return_t kr;
kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link);
if (kr == KERN_SUCCESS) {
knote_markstayactive(kn);
return (0);
} else {
return (EINVAL);
}
}
int
knote_unlink_waitq(struct knote *kn, struct waitq *wq)
{
struct kqueue *kq = knote_get_kq(kn);
kern_return_t kr;
kr = waitq_unlink(wq, &kq->kq_wqs);
knote_clearstayactive(kn);
return ((kr != KERN_SUCCESS) ? EINVAL : 0);
}
void
knote_fdclose(struct proc *p, int fd, int force)
{
struct klist *list;
struct knote *kn;
restart:
list = &p->p_fd->fd_knlist[fd];
SLIST_FOREACH(kn, list, kn_link) {
struct kqueue *kq = knote_get_kq(kn);
kqlock(kq);
if (kq->kq_p != p)
panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
__func__, kq->kq_p, p);
if (!force && (kn->kn_status & KN_REQVANISH)) {
if ((kn->kn_status & KN_VANISHED) == 0) {
proc_fdunlock(p);
if (kqlock2knotedetach(kq, kn)) {
knote_fops(kn)->f_detach(kn);
if (knote_fops(kn)->f_isfd)
fp_drop(p, kn->kn_id, kn->kn_fp, 0);
if (knoteuse2kqlock(kq, kn, 0)) {
knote_activate(kn);
}
kqunlock(kq);
}
proc_fdlock(p);
goto restart;
} else {
kqunlock(kq);
continue;
}
}
proc_fdunlock(p);
if (kqlock2knotedrop(kq, kn)) {
knote_drop(kn, p);
}
proc_fdlock(p);
goto restart;
}
}
static int
knote_fdadd(struct knote *kn, struct proc *p)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
if (! knote_fops(kn)->f_isfd) {
if (fdp->fd_knhashmask == 0)
fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE,
&fdp->fd_knhashmask);
list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
} else {
if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
u_int size = 0;
if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur
|| kn->kn_id >= (uint64_t)maxfiles)
return (EINVAL);
size = fdp->fd_knlistsize;
while (size <= kn->kn_id)
size += KQEXTENT;
if (size >= (UINT_MAX/sizeof(struct klist *)))
return (EINVAL);
MALLOC(list, struct klist *,
size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
if (list == NULL)
return (ENOMEM);
bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
fdp->fd_knlistsize * sizeof(struct klist *));
bzero((caddr_t)list +
fdp->fd_knlistsize * sizeof(struct klist *),
(size - fdp->fd_knlistsize) * sizeof(struct klist *));
FREE(fdp->fd_knlist, M_KQUEUE);
fdp->fd_knlist = list;
fdp->fd_knlistsize = size;
}
list = &fdp->fd_knlist[kn->kn_id];
}
SLIST_INSERT_HEAD(list, kn, kn_link);
return (0);
}
static void
knote_fdremove(struct knote *kn, struct proc *p)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
if (knote_fops(kn)->f_isfd) {
assert ((u_int)fdp->fd_knlistsize > kn->kn_id);
list = &fdp->fd_knlist[kn->kn_id];
} else {
list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
}
SLIST_REMOVE(list, kn, knote, kn_link);
}
static struct knote *
knote_fdfind(struct kqueue *kq,
struct kevent_internal_s *kev,
struct proc *p)
{
struct filedesc *fdp = p->p_fd;
struct klist *list = NULL;
struct knote *kn = NULL;
struct filterops *fops;
fops = sysfilt_ops[~kev->filter];
if (fops->f_isfd) {
if (kev->ident < (u_int)fdp->fd_knlistsize) {
list = &fdp->fd_knlist[kev->ident];
}
} else if (fdp->fd_knhashmask != 0) {
list = &fdp->fd_knhash[KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
}
if (list != NULL) {
SLIST_FOREACH(kn, list, kn_link) {
if (kq == knote_get_kq(kn) &&
kev->ident == kn->kn_id &&
kev->filter == kn->kn_filter) {
if (kev->flags & EV_UDATA_SPECIFIC) {
if ((kn->kn_status & KN_UDATA_SPECIFIC) &&
kev->udata == kn->kn_udata) {
break;
}
} else if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) {
break;
}
}
}
}
return kn;
}
static void
knote_drop(struct knote *kn, __unused struct proc *ctxp)
{
struct kqueue *kq = knote_get_kq(kn);
struct proc *p = kq->kq_p;
int needswakeup;
assert(kn->kn_status & KN_DROPPING);
if (kn->kn_status & KN_ATTACHED) {
knote_fops(kn)->f_detach(kn);
}
proc_fdlock(p);
knote_fdremove(kn, p);
kqlock(kq);
proc_fdunlock(p);
assert((kn->kn_status & (KN_SUPPRESSED | KN_QUEUED)) == 0);
needswakeup = (kn->kn_status & KN_USEWAIT);
kqunlock(kq);
if (needswakeup)
waitq_wakeup64_all((struct waitq *)&kq->kq_wqs,
CAST_EVENT64_T(&kn->kn_status),
THREAD_RESTART,
WAITQ_ALL_PRIORITIES);
if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0))
fp_drop(p, kn->kn_id, kn->kn_fp, 0);
knote_free(kn);
}
static void
knote_activate(struct knote *kn)
{
if (kn->kn_status & KN_ACTIVE)
return;
kn->kn_status |= KN_ACTIVE;
if (knote_enqueue(kn))
knote_wakeup(kn);
}
static void
knote_deactivate(struct knote *kn)
{
kn->kn_status &= ~KN_ACTIVE;
if ((kn->kn_status & KN_STAYACTIVE) == 0)
knote_dequeue(kn);
}
static void
knote_enable(struct knote *kn)
{
if ((kn->kn_status & KN_DISABLED) == 0)
return;
kn->kn_status &= ~KN_DISABLED;
if (knote_enqueue(kn))
knote_wakeup(kn);
}
static void
knote_disable(struct knote *kn)
{
if (kn->kn_status & KN_DISABLED)
return;
kn->kn_status |= KN_DISABLED;
knote_dequeue(kn);
}
static void
knote_suppress(struct knote *kn)
{
struct kqtailq *suppressq;
if (kn->kn_status & KN_SUPPRESSED)
return;
knote_dequeue(kn);
kn->kn_status |= KN_SUPPRESSED;
suppressq = knote_get_suppressed_queue(kn);
TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
}
static void
knote_unsuppress(struct knote *kn)
{
struct kqtailq *suppressq;
if ((kn->kn_status & KN_SUPPRESSED) == 0)
return;
kn->kn_status &= ~KN_SUPPRESSED;
suppressq = knote_get_suppressed_queue(kn);
TAILQ_REMOVE(suppressq, kn, kn_tqe);
kn->kn_qos_index = kn->kn_req_index;
if (knote_enqueue(kn) &&
(kn->kn_status & KN_ACTIVE))
knote_wakeup(kn);
}
static int
knote_enqueue(struct knote *kn)
{
if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 ||
(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)))
return 0;
if ((kn->kn_status & KN_QUEUED) == 0) {
struct kqtailq *queue = knote_get_queue(kn);
struct kqueue *kq = knote_get_kq(kn);
TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
kn->kn_status |= KN_QUEUED;
kq->kq_count++;
return 1;
}
return ((kn->kn_status & KN_STAYACTIVE) != 0);
}
static void
knote_dequeue(struct knote *kn)
{
struct kqueue *kq = knote_get_kq(kn);
struct kqtailq *queue;
if ((kn->kn_status & KN_QUEUED) == 0)
return;
queue = knote_get_queue(kn);
TAILQ_REMOVE(queue, kn, kn_tqe);
kn->kn_status &= ~KN_QUEUED;
kq->kq_count--;
}
void
knote_init(void)
{
knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote),
8192, "knote zone");
kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile),
8192, "kqueue file zone");
kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq),
8192, "kqueue workq zone");
kq_lck_grp_attr = lck_grp_attr_alloc_init();
kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
kq_lck_attr = lck_attr_alloc_init();
lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr);
lck_spin_init(&_filt_userlock, kq_lck_grp, kq_lck_attr);
#if CONFIG_MEMORYSTATUS
memorystatus_kevent_init(kq_lck_grp, kq_lck_attr);
#endif
}
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
struct filterops *
knote_fops(struct knote *kn)
{
return sysfilt_ops[kn->kn_filtid];
}
static struct knote *
knote_alloc(void)
{
return ((struct knote *)zalloc(knote_zone));
}
static void
knote_free(struct knote *kn)
{
zfree(knote_zone, kn);
}
#if SOCKETS
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/protosw.h>
#include <sys/domain.h>
#include <sys/mbuf.h>
#include <sys/kern_event.h>
#include <sys/malloc.h>
#include <sys/sys_domain.h>
#include <sys/syslog.h>
#ifndef ROUNDUP64
#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
#endif
#ifndef ADVANCE64
#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
#endif
static lck_grp_attr_t *kev_lck_grp_attr;
static lck_attr_t *kev_lck_attr;
static lck_grp_t *kev_lck_grp;
static decl_lck_rw_data(,kev_lck_data);
static lck_rw_t *kev_rwlock = &kev_lck_data;
static int kev_attach(struct socket *so, int proto, struct proc *p);
static int kev_detach(struct socket *so);
static int kev_control(struct socket *so, u_long cmd, caddr_t data,
struct ifnet *ifp, struct proc *p);
static lck_mtx_t * event_getlock(struct socket *, int);
static int event_lock(struct socket *, int, void *);
static int event_unlock(struct socket *, int, void *);
static int event_sofreelastref(struct socket *);
static void kev_delete(struct kern_event_pcb *);
static struct pr_usrreqs event_usrreqs = {
.pru_attach = kev_attach,
.pru_control = kev_control,
.pru_detach = kev_detach,
.pru_soreceive = soreceive,
};
static struct protosw eventsw[] = {
{
.pr_type = SOCK_RAW,
.pr_protocol = SYSPROTO_EVENT,
.pr_flags = PR_ATOMIC,
.pr_usrreqs = &event_usrreqs,
.pr_lock = event_lock,
.pr_unlock = event_unlock,
.pr_getlock = event_getlock,
}
};
__private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
__private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
struct kevtstat kevtstat;
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
kevt_getstat, "S,kevtstat", "");
SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
kevt_pcblist, "S,xkevtpcb", "");
static lck_mtx_t *
event_getlock(struct socket *so, int locktype)
{
#pragma unused(locktype)
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
if (so->so_pcb != NULL) {
if (so->so_usecount < 0)
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
} else {
panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
so, solockhistory_nr(so));
}
return (&ev_pcb->evp_mtx);
}
static int
event_lock(struct socket *so, int refcount, void *lr)
{
void *lr_saved;
if (lr == NULL)
lr_saved = __builtin_return_address(0);
else
lr_saved = lr;
if (so->so_pcb != NULL) {
lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
} else {
panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
so, lr_saved, solockhistory_nr(so));
}
if (so->so_usecount < 0) {
panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
so, so->so_pcb, lr_saved, so->so_usecount,
solockhistory_nr(so));
}
if (refcount)
so->so_usecount++;
so->lock_lr[so->next_lock_lr] = lr_saved;
so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
}
static int
event_unlock(struct socket *so, int refcount, void *lr)
{
void *lr_saved;
lck_mtx_t *mutex_held;
if (lr == NULL)
lr_saved = __builtin_return_address(0);
else
lr_saved = lr;
if (refcount) {
VERIFY(so->so_usecount > 0);
so->so_usecount--;
}
if (so->so_usecount < 0) {
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
}
if (so->so_pcb == NULL) {
panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
so, so->so_usecount, (void *)lr_saved,
solockhistory_nr(so));
}
mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
if (so->so_usecount == 0) {
VERIFY(so->so_flags & SOF_PCBCLEARING);
event_sofreelastref(so);
} else {
lck_mtx_unlock(mutex_held);
}
return (0);
}
static int
event_sofreelastref(struct socket *so)
{
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
so->so_pcb = NULL;
so->so_rcv.sb_flags &= ~SB_UPCALL;
so->so_snd.sb_flags &= ~SB_UPCALL;
so->so_event = sonullevent;
lck_mtx_unlock(&(ev_pcb->evp_mtx));
lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
lck_rw_lock_exclusive(kev_rwlock);
LIST_REMOVE(ev_pcb, evp_link);
kevtstat.kes_pcbcount--;
kevtstat.kes_gencnt++;
lck_rw_done(kev_rwlock);
kev_delete(ev_pcb);
sofreelastref(so, 1);
return (0);
}
static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
static
struct kern_event_head kern_event_head;
static u_int32_t static_event_id = 0;
#define EVPCB_ZONE_MAX 65536
#define EVPCB_ZONE_NAME "kerneventpcb"
static struct zone *ev_pcb_zone;
void
kern_event_init(struct domain *dp)
{
struct protosw *pr;
int i;
VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
VERIFY(dp == systemdomain);
kev_lck_grp_attr = lck_grp_attr_alloc_init();
if (kev_lck_grp_attr == NULL) {
panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
}
kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol",
kev_lck_grp_attr);
if (kev_lck_grp == NULL) {
panic("%s: lck_grp_alloc_init failed\n", __func__);
}
kev_lck_attr = lck_attr_alloc_init();
if (kev_lck_attr == NULL) {
panic("%s: lck_attr_alloc_init failed\n", __func__);
}
lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr);
if (kev_rwlock == NULL) {
panic("%s: lck_mtx_alloc_init failed\n", __func__);
}
for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++)
net_add_proto(pr, dp, 1);
ev_pcb_zone = zinit(sizeof(struct kern_event_pcb),
EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME);
if (ev_pcb_zone == NULL) {
panic("%s: failed allocating ev_pcb_zone", __func__);
}
zone_change(ev_pcb_zone, Z_EXPAND, TRUE);
zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE);
}
static int
kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
{
int error = 0;
struct kern_event_pcb *ev_pcb;
error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
if (error != 0)
return (error);
if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) {
return (ENOBUFS);
}
bzero(ev_pcb, sizeof(struct kern_event_pcb));
lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr);
ev_pcb->evp_socket = so;
ev_pcb->evp_vendor_code_filter = 0xffffffff;
so->so_pcb = (caddr_t) ev_pcb;
lck_rw_lock_exclusive(kev_rwlock);
LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
kevtstat.kes_pcbcount++;
kevtstat.kes_gencnt++;
lck_rw_done(kev_rwlock);
return (error);
}
static void
kev_delete(struct kern_event_pcb *ev_pcb)
{
VERIFY(ev_pcb != NULL);
lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp);
zfree(ev_pcb_zone, ev_pcb);
}
static int
kev_detach(struct socket *so)
{
struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
if (ev_pcb != NULL) {
soisdisconnected(so);
so->so_flags |= SOF_PCBCLEARING;
}
return (0);
}
errno_t kev_vendor_code_find(
const char *string,
u_int32_t *out_vendor_code)
{
if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
return (EINVAL);
}
return (net_str_id_find_internal(string, out_vendor_code,
NSI_VENDOR_CODE, 1));
}
errno_t
kev_msg_post(struct kev_msg *event_msg)
{
mbuf_tag_id_t min_vendor, max_vendor;
net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
if (event_msg == NULL)
return (EINVAL);
if (event_msg->vendor_code < min_vendor ||
event_msg->vendor_code > max_vendor) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor);
return (EINVAL);
}
return (kev_post_msg(event_msg));
}
int
kev_post_msg(struct kev_msg *event_msg)
{
struct mbuf *m, *m2;
struct kern_event_pcb *ev_pcb;
struct kern_event_msg *ev;
char *tmp;
u_int32_t total_size;
int i;
total_size = KEV_MSG_HEADER_SIZE;
for (i = 0; i < 5; i++) {
if (event_msg->dv[i].data_length == 0)
break;
total_size += event_msg->dv[i].data_length;
}
if (total_size > MLEN) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig);
return (EMSGSIZE);
}
m = m_get(M_DONTWAIT, MT_DATA);
if (m == 0) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
return (ENOMEM);
}
ev = mtod(m, struct kern_event_msg *);
total_size = KEV_MSG_HEADER_SIZE;
tmp = (char *) &ev->event_data[0];
for (i = 0; i < 5; i++) {
if (event_msg->dv[i].data_length == 0)
break;
total_size += event_msg->dv[i].data_length;
bcopy(event_msg->dv[i].data_ptr, tmp,
event_msg->dv[i].data_length);
tmp += event_msg->dv[i].data_length;
}
ev->id = ++static_event_id;
ev->total_size = total_size;
ev->vendor_code = event_msg->vendor_code;
ev->kev_class = event_msg->kev_class;
ev->kev_subclass = event_msg->kev_subclass;
ev->event_code = event_msg->event_code;
m->m_len = total_size;
lck_rw_lock_shared(kev_rwlock);
for (ev_pcb = LIST_FIRST(&kern_event_head);
ev_pcb;
ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
lck_mtx_lock(&ev_pcb->evp_mtx);
if (ev_pcb->evp_socket->so_pcb == NULL) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
if (ev_pcb->evp_class_filter != ev->kev_class) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
if ((ev_pcb->evp_subclass_filter !=
KEV_ANY_SUBCLASS) &&
(ev_pcb->evp_subclass_filter !=
ev->kev_subclass)) {
lck_mtx_unlock(&ev_pcb->evp_mtx);
continue;
}
}
}
m2 = m_copym(m, 0, m->m_len, M_NOWAIT);
if (m2 == 0) {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem);
m_free(m);
lck_mtx_unlock(&ev_pcb->evp_mtx);
lck_rw_done(kev_rwlock);
return (ENOMEM);
}
if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
so_inc_recv_data_stat(ev_pcb->evp_socket,
1, m->m_len, MBUF_TC_BE);
sorwakeup(ev_pcb->evp_socket);
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_posted);
} else {
OSIncrementAtomic64((SInt64 *)&kevtstat.kes_fullsock);
}
lck_mtx_unlock(&ev_pcb->evp_mtx);
}
m_free(m);
lck_rw_done(kev_rwlock);
return (0);
}
static int
kev_control(struct socket *so,
u_long cmd,
caddr_t data,
__unused struct ifnet *ifp,
__unused struct proc *p)
{
struct kev_request *kev_req = (struct kev_request *) data;
struct kern_event_pcb *ev_pcb;
struct kev_vendor_code *kev_vendor;
u_int32_t *id_value = (u_int32_t *) data;
switch (cmd) {
case SIOCGKEVID:
*id_value = static_event_id;
break;
case SIOCSKEVFILT:
ev_pcb = (struct kern_event_pcb *) so->so_pcb;
ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
ev_pcb->evp_class_filter = kev_req->kev_class;
ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
break;
case SIOCGKEVFILT:
ev_pcb = (struct kern_event_pcb *) so->so_pcb;
kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
kev_req->kev_class = ev_pcb->evp_class_filter;
kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
break;
case SIOCGKEVVENDOR:
kev_vendor = (struct kev_vendor_code *)data;
kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0;
return (net_str_id_find_internal(kev_vendor->vendor_string,
&kev_vendor->vendor_code, NSI_VENDOR_CODE, 0));
default:
return (ENOTSUP);
}
return (0);
}
int
kevt_getstat SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
lck_rw_lock_shared(kev_rwlock);
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
goto done;
}
if (req->oldptr == USER_ADDR_NULL) {
req->oldidx = sizeof(struct kevtstat);
goto done;
}
error = SYSCTL_OUT(req, &kevtstat,
MIN(sizeof(struct kevtstat), req->oldlen));
done:
lck_rw_done(kev_rwlock);
return (error);
}
__private_extern__ int
kevt_pcblist SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
int n, i;
struct xsystmgen xsg;
void *buf = NULL;
size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) +
ROUNDUP64(sizeof (struct xsocket_n)) +
2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
ROUNDUP64(sizeof (struct xsockstat_n));
struct kern_event_pcb *ev_pcb;
buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
if (buf == NULL)
return (ENOMEM);
lck_rw_lock_shared(kev_rwlock);
n = kevtstat.kes_pcbcount;
if (req->oldptr == USER_ADDR_NULL) {
req->oldidx = (n + n/8) * item_size;
goto done;
}
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
goto done;
}
bzero(&xsg, sizeof (xsg));
xsg.xg_len = sizeof (xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
if (error) {
goto done;
}
if (n == 0) {
goto done;
}
i = 0;
for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
i < n && ev_pcb != NULL;
i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
struct xkevtpcb *xk = (struct xkevtpcb *)buf;
struct xsocket_n *xso = (struct xsocket_n *)
ADVANCE64(xk, sizeof (*xk));
struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
ADVANCE64(xso, sizeof (*xso));
struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
ADVANCE64(xsbrcv, sizeof (*xsbrcv));
struct xsockstat_n *xsostats = (struct xsockstat_n *)
ADVANCE64(xsbsnd, sizeof (*xsbsnd));
bzero(buf, item_size);
lck_mtx_lock(&ev_pcb->evp_mtx);
xk->kep_len = sizeof(struct xkevtpcb);
xk->kep_kind = XSO_EVT;
xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
xk->kep_class_filter = ev_pcb->evp_class_filter;
xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
sotoxsocket_n(ev_pcb->evp_socket, xso);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
&ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
sbtoxsockbuf_n(ev_pcb->evp_socket ?
&ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
lck_mtx_unlock(&ev_pcb->evp_mtx);
error = SYSCTL_OUT(req, buf, item_size);
}
if (error == 0) {
bzero(&xsg, sizeof (xsg));
xsg.xg_len = sizeof (xsg);
xsg.xg_count = n;
xsg.xg_gen = kevtstat.kes_gencnt;
xsg.xg_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
if (error) {
goto done;
}
}
done:
lck_rw_done(kev_rwlock);
return (error);
}
#endif
int
fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
{
struct vinfo_stat * st;
st = &kinfo->kq_stat;
st->vst_size = kq->kq_count;
if (kq->kq_state & KQ_KEV_QOS)
st->vst_blksize = sizeof(struct kevent_qos_s);
else if (kq->kq_state & KQ_KEV64)
st->vst_blksize = sizeof(struct kevent64_s);
else
st->vst_blksize = sizeof(struct kevent);
st->vst_mode = S_IFIFO;
#define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS)
kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK;
return (0);
}
void
knote_markstayactive(struct knote *kn)
{
kqlock(knote_get_kq(kn));
kn->kn_status |= KN_STAYACTIVE;
if (knote_get_kq(kn)->kq_state & KQ_WORKQ)
knote_set_qos_index(kn, KQWQ_QOS_MANAGER);
knote_activate(kn);
kqunlock(knote_get_kq(kn));
}
void
knote_clearstayactive(struct knote *kn)
{
kqlock(knote_get_kq(kn));
kn->kn_status &= ~KN_STAYACTIVE;
knote_deactivate(kn);
kqunlock(knote_get_kq(kn));
}
static unsigned long
kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
unsigned long buflen, unsigned long nknotes)
{
struct kevent_internal_s *kevp;
for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
if (kq == knote_get_kq(kn)) {
if (nknotes < buflen) {
struct kevent_extinfo *info = &buf[nknotes];
struct kevent_qos_s kevqos;
kqlock(kq);
kevp = &(kn->kn_kevent);
bzero(&kevqos, sizeof(kevqos));
kevqos.ident = kevp->ident;
kevqos.filter = kevp->filter;
kevqos.flags = kevp->flags;
kevqos.fflags = kevp->fflags;
kevqos.data = (int64_t) kevp->data;
kevqos.udata = kevp->udata;
kevqos.ext[0] = kevp->ext[0];
kevqos.ext[1] = kevp->ext[1];
memcpy(&info->kqext_kev, &kevqos, sizeof(info->kqext_kev));
info->kqext_sdata = kn->kn_sdata;
info->kqext_status = kn->kn_status;
info->kqext_sfflags = kn->kn_sfflags;
kqunlock(kq);
}
nknotes++;
}
}
return nknotes;
}
int
pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
uint32_t bufsize, int32_t *retval)
{
struct knote *kn;
int i;
int err = 0;
struct filedesc *fdp = p->p_fd;
unsigned long nknotes = 0;
unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
struct kevent_extinfo *kqext = NULL;
buflen = min(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
kqext = kalloc(buflen * sizeof(struct kevent_extinfo));
if (kqext == NULL) {
err = ENOMEM;
goto out;
}
bzero(kqext, buflen * sizeof(struct kevent_extinfo));
proc_fdlock(p);
for (i = 0; i < fdp->fd_knlistsize; i++) {
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
}
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
}
}
proc_fdunlock(p);
assert(bufsize >= sizeof(struct kevent_extinfo) * min(buflen, nknotes));
err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * min(buflen, nknotes));
out:
if (kqext) {
kfree(kqext, buflen * sizeof(struct kevent_extinfo));
kqext = NULL;
}
if (!err) {
*retval = min(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
}
return err;
}
static unsigned long
kevent_udatainfo_emit(struct kqueue *kq, struct knote *kn, uint64_t *buf,
unsigned long buflen, unsigned long nknotes)
{
struct kevent_internal_s *kevp;
for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
if (kq == knote_get_kq(kn)) {
if (nknotes < buflen) {
kqlock(kq);
kevp = &(kn->kn_kevent);
buf[nknotes] = kevp->udata;
kqunlock(kq);
}
nknotes++;
}
}
return nknotes;
}
int
pid_kqueue_udatainfo(proc_t p, struct kqueue *kq, uint64_t *buf,
uint32_t bufsize)
{
struct knote *kn;
int i;
struct filedesc *fdp = p->p_fd;
unsigned long nknotes = 0;
unsigned long buflen = bufsize / sizeof(uint64_t);
proc_fdlock(p);
for (i = 0; i < fdp->fd_knlistsize; i++) {
kn = SLIST_FIRST(&fdp->fd_knlist[i]);
nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
}
if (fdp->fd_knhashmask != 0) {
for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
kn = SLIST_FIRST(&fdp->fd_knhash[i]);
nknotes = kevent_udatainfo_emit(kq, kn, buf, buflen, nknotes);
}
}
proc_fdunlock(p);
return (int)nknotes;
}