#include <mach/port.h>
#include <mach/kern_return.h>
#include <mach/message.h>
#include <ipc/ipc_mqueue.h>
#include <ipc/ipc_object.h>
#include <ipc/ipc_pset.h>
#include <ipc/ipc_right.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_port.h>
#include <kern/kern_types.h>
#include <vm/vm_map.h>
#include <libkern/section_keywords.h>
kern_return_t
ipc_pset_alloc(
ipc_space_t space,
mach_port_name_t *namep,
ipc_pset_t *psetp)
{
ipc_pset_t pset;
mach_port_name_t name;
kern_return_t kr;
uint64_t reserved_link;
reserved_link = waitq_link_reserve(NULL);
kr = ipc_object_alloc(space, IOT_PORT_SET,
MACH_PORT_TYPE_PORT_SET, 0,
&name, (ipc_object_t *) &pset);
if (kr != KERN_SUCCESS) {
waitq_link_release(reserved_link);
return kr;
}
ipc_mqueue_init(&pset->ips_messages, TRUE , &reserved_link);
is_write_unlock(space);
waitq_link_release(reserved_link);
*namep = name;
*psetp = pset;
return KERN_SUCCESS;
}
kern_return_t
ipc_pset_alloc_name(
ipc_space_t space,
mach_port_name_t name,
ipc_pset_t *psetp)
{
ipc_pset_t pset;
kern_return_t kr;
uint64_t reserved_link;
reserved_link = waitq_link_reserve(NULL);
kr = ipc_object_alloc_name(space, IOT_PORT_SET,
MACH_PORT_TYPE_PORT_SET, 0,
name, (ipc_object_t *) &pset);
if (kr != KERN_SUCCESS) {
waitq_link_release(reserved_link);
return kr;
}
ipc_mqueue_init(&pset->ips_messages, TRUE , &reserved_link);
waitq_link_release(reserved_link);
*psetp = pset;
return KERN_SUCCESS;
}
ipc_pset_t
ipc_pset_alloc_special(
__assert_only ipc_space_t space)
{
ipc_pset_t pset;
uint64_t reserved_link;
assert(space != IS_NULL);
assert(space->is_table == IE_NULL);
assert(!is_active(space));
reserved_link = waitq_link_reserve(NULL);
__IGNORE_WCASTALIGN(pset = (ipc_pset_t)io_alloc(IOT_PORT_SET));
if (pset == IPS_NULL) {
waitq_link_release(reserved_link);
return IPS_NULL;
}
bzero((char *)pset, sizeof(*pset));
io_lock_init(&pset->ips_object);
pset->ips_references = 1;
pset->ips_object.io_bits = io_makebits(TRUE, IOT_PORT_SET, 0);
ipc_mqueue_init(&pset->ips_messages, TRUE , &reserved_link);
waitq_link_release(reserved_link);
return pset;
}
boolean_t
ipc_pset_member(
ipc_pset_t pset,
ipc_port_t port)
{
assert(ip_active(port));
return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages));
}
kern_return_t
ipc_pset_add(
ipc_pset_t pset,
ipc_port_t port,
uint64_t *reserved_link,
uint64_t *reserved_prepost)
{
kern_return_t kr;
assert(ips_active(pset));
assert(ip_active(port));
kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages,
reserved_link, reserved_prepost);
return kr;
}
kern_return_t
ipc_pset_remove(
ipc_pset_t pset,
ipc_port_t port)
{
kern_return_t kr;
assert(ip_active(port));
if (port->ip_in_pset == 0)
return KERN_NOT_IN_SET;
kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages);
return kr;
}
kern_return_t
ipc_pset_remove_from_all(
ipc_port_t port)
{
if (port->ip_in_pset == 0)
return KERN_NOT_IN_SET;
ipc_mqueue_remove_from_all(&port->ip_messages);
return KERN_SUCCESS;
}
void
ipc_pset_destroy(
ipc_pset_t pset)
{
assert(ips_active(pset));
pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
ipc_mqueue_remove_all(&pset->ips_messages);
imq_lock(&pset->ips_messages);
ipc_mqueue_changed(&pset->ips_messages);
imq_unlock(&pset->ips_messages);
ipc_mqueue_deinit(&pset->ips_messages);
ips_unlock(pset);
ips_release(pset);
}
#include <sys/event.h>
#include <sys/errno.h>
static int filt_machportattach(struct knote *kn, struct kevent_internal_s *kev);
static void filt_machportdetach(struct knote *kn);
static int filt_machport(struct knote *kn, long hint);
static int filt_machporttouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_machportprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static unsigned filt_machportpeek(struct knote *kn);
SECURITY_READ_ONLY_EARLY(struct filterops) machport_filtops = {
.f_adjusts_qos = 1,
.f_attach = filt_machportattach,
.f_detach = filt_machportdetach,
.f_event = filt_machport,
.f_touch = filt_machporttouch,
.f_process = filt_machportprocess,
.f_peek = filt_machportpeek,
};
static int
filt_machportattach(
struct knote *kn,
__unused struct kevent_internal_s *kev)
{
mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident;
uint64_t wq_link_id = waitq_link_reserve(NULL);
ipc_space_t space = current_space();
ipc_kmsg_t first;
int error;
int result = 0;
kern_return_t kr;
ipc_entry_t entry;
ipc_mqueue_t mqueue;
kr = ipc_right_lookup_read(space, name, &entry);
if (kr == KERN_SUCCESS) {
if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
ipc_pset_t pset;
__IGNORE_WCASTALIGN(pset = (ipc_pset_t)entry->ie_object);
mqueue = &pset->ips_messages;
ips_reference(pset);
imq_lock(mqueue);
kn->kn_ptr.p_mqueue = mqueue;
error = knote_link_waitq(kn, &mqueue->imq_wait_queue, &wq_link_id);
if (!error) {
KNOTE_ATTACH(&mqueue->imq_klist, kn);
imq_unlock(mqueue);
}
else {
kn->kn_ptr.p_mqueue = IMQ_NULL;
imq_unlock(mqueue);
ips_release(pset);
}
is_read_unlock(space);
} else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
ipc_port_t port;
__IGNORE_WCASTALIGN(port = (ipc_port_t)entry->ie_object);
mqueue = &port->ip_messages;
ip_reference(port);
imq_lock(mqueue);
kn->kn_ptr.p_mqueue = mqueue;
KNOTE_ATTACH(&mqueue->imq_klist, kn);
if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
if (kn->kn_sfflags & MACH_RCV_MSG)
knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
sync_qos_override_index);
result = 1;
}
imq_unlock(mqueue);
is_read_unlock(space);
error = 0;
} else {
is_read_unlock(space);
error = ENOTSUP;
}
} else {
error = ENOENT;
}
waitq_link_release(wq_link_id);
if (error) {
kn->kn_flags |= EV_ERROR;
kn->kn_data = error;
return 0;
}
return result;
}
#define mqueue_to_pset(mq) ((ipc_pset_t)((uintptr_t)mq-offsetof(struct ipc_pset, ips_messages)))
#define mqueue_to_port(mq) ((ipc_port_t)((uintptr_t)mq-offsetof(struct ipc_port, ip_messages)))
#define mqueue_to_object(mq) (((ipc_object_t)(mq)) - 1)
static void
filt_machportdetach(
struct knote *kn)
{
ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
ipc_object_t object = mqueue_to_object(mqueue);
imq_lock(mqueue);
KNOTE_DETACH(&mqueue->imq_klist, kn);
kn->kn_ptr.p_mqueue = IMQ_NULL;
imq_unlock(mqueue);
if (io_otype(object) == IOT_PORT_SET) {
(void)knote_unlink_waitq(kn, &mqueue->imq_wait_queue);
}
io_release(object);
}
static int
filt_machport(
struct knote *kn,
long hint)
{
ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
ipc_kmsg_t first;
int result = 0;
assert(imq_held(mqueue));
if (hint == NOTE_REVOKE) {
kn->kn_flags |= EV_EOF | EV_ONESHOT;
result = 1;
} else if (imq_is_valid(mqueue)) {
assert(!imq_is_set(mqueue));
if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
ipc_port_t port = ip_from_mq(mqueue);
int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
if (kn->kn_sfflags & MACH_RCV_MSG)
knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
sync_qos_override_index);
result = 1;
}
}
return result;
}
static int
filt_machporttouch(
struct knote *kn,
struct kevent_internal_s *kev)
{
ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
ipc_kmsg_t first;
int result = 0;
imq_lock(mqueue);
kn->kn_sfflags = kev->fflags;
kn->kn_ext[0] = kev->ext[0];
kn->kn_ext[1] = kev->ext[1];
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
(first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
ipc_port_t port = ip_from_mq(mqueue);
int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
if (kn->kn_sfflags & MACH_RCV_MSG)
knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
sync_qos_override_index);
result = 1;
} else if (kn->kn_sfflags & MACH_RCV_MSG) {
knote_adjust_qos(kn,
MACH_MSG_PRIORITY_UNSPECIFIED,
MACH_MSG_PRIORITY_UNSPECIFIED,
THREAD_QOS_UNSPECIFIED);
}
imq_unlock(mqueue);
return result;
}
static int
filt_machportprocess(
struct knote *kn,
struct filt_process_s *process_data,
struct kevent_internal_s *kev)
{
ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
ipc_object_t object = mqueue_to_object(mqueue);
thread_t self = current_thread();
boolean_t used_filtprocess_data = FALSE;
wait_result_t wresult;
mach_msg_option_t option;
mach_vm_address_t addr;
mach_msg_size_t size;
imq_lock(mqueue);
*kev = kn->kn_kevent;
if (kev->flags & EV_EOF) {
imq_unlock(mqueue);
return 1;
}
option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
if (option & MACH_RCV_MSG) {
addr = (mach_vm_address_t) kn->kn_ext[0];
size = (mach_msg_size_t) kn->kn_ext[1];
if (size == 0 && process_data != NULL) {
used_filtprocess_data = TRUE;
addr = (mach_vm_address_t)process_data->fp_data_out;
size = (mach_msg_size_t)process_data->fp_data_resid;
option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
option |= MACH_RCV_STACK;
}
} else {
option = MACH_RCV_LARGE;
addr = 0;
size = 0;
}
io_reference(object);
self->ith_object = object;
self->ith_msg_addr = addr;
self->ith_rsize = size;
self->ith_msize = 0;
self->ith_option = option;
self->ith_receiver_name = MACH_PORT_NULL;
self->ith_continuation = NULL;
option |= MACH_RCV_TIMEOUT; self->ith_state = MACH_RCV_IN_PROGRESS;
self->ith_knote = kn;
wresult = ipc_mqueue_receive_on_thread(
mqueue,
option,
size,
0,
THREAD_INTERRUPTIBLE,
self);
if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
io_release(object);
return 0;
}
assert(wresult == THREAD_NOT_WAITING);
assert(self->ith_state != MACH_RCV_IN_PROGRESS);
if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) {
assert(self->ith_state == MACH_RCV_TOO_LARGE);
assert(self->ith_kmsg == IKM_NULL);
kev->data = self->ith_receiver_name;
io_release(object);
return 1;
}
kev->fflags = mach_msg_receive_results(&size);
if (kev->fflags == MACH_RCV_TOO_LARGE) {
kev->ext[1] = self->ith_msize;
if (option & MACH_RCV_LARGE_IDENTITY)
kev->data = self->ith_receiver_name;
else
kev->data = MACH_PORT_NULL;
} else {
kev->ext[1] = size;
kev->data = MACH_PORT_NULL;
}
if (used_filtprocess_data) {
assert(process_data->fp_data_resid >= size);
process_data->fp_data_resid -= size;
if ((process_data->fp_flags & KEVENT_FLAG_STACK_DATA) == 0) {
kev->ext[0] = process_data->fp_data_out;
process_data->fp_data_out += size;
} else {
assert(option & MACH_RCV_STACK);
kev->ext[0] = process_data->fp_data_out +
process_data->fp_data_resid;
}
}
if (kev->fflags == MACH_MSG_SUCCESS) {
kev->qos = mach_msg_priority_combine(self->ith_qos, kn->kn_qos);
kev->ext[2] = ((uint64_t)self->ith_qos << 32) |
(uint64_t)self->ith_qos_override;
}
return 1;
}
static unsigned
filt_machportpeek(struct knote *kn)
{
ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
return (ipc_mqueue_set_peek(mqueue));
}