#include <sys/work_interval.h>
#include <kern/work_interval.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/machine.h>
#include <kern/thread_group.h>
#include <kern/ipc_kobject.h>
#include <kern/task.h>
#include <mach/kern_return.h>
#include <mach/notify.h>
#include <stdatomic.h>
struct work_interval {
uint64_t wi_id;
_Atomic uint32_t wi_ref_count;
uint32_t wi_create_flags;
ipc_port_t wi_port;
uint64_t wi_creator_uniqueid;
uint32_t wi_creator_pid;
int wi_creator_pidversion;
};
static inline void
wi_retain(struct work_interval *work_interval)
{
uint32_t old_count;
old_count = atomic_fetch_add_explicit(&work_interval->wi_ref_count,
1, memory_order_relaxed);
assert(old_count > 0);
}
static inline void
wi_release(struct work_interval *work_interval)
{
uint32_t old_count;
old_count = atomic_fetch_sub_explicit(&work_interval->wi_ref_count,
1, memory_order_relaxed);
assert(old_count > 0);
if (old_count == 1) {
kfree(work_interval, sizeof(struct work_interval));
}
}
static ipc_port_t
work_interval_port_alloc(struct work_interval *work_interval)
{
ipc_port_t work_interval_port = ipc_port_alloc_kernel();
if (work_interval_port == IP_NULL)
panic("failed to allocate work interval port");
assert(work_interval->wi_port == IP_NULL);
ip_lock(work_interval_port);
ipc_kobject_set_atomically(work_interval_port, (ipc_kobject_t)work_interval,
IKOT_WORK_INTERVAL);
ipc_port_t notify_port = ipc_port_make_sonce_locked(work_interval_port);
ipc_port_t old_notify_port = IP_NULL;
ipc_port_nsrequest(work_interval_port, 1, notify_port, &old_notify_port);
assert(old_notify_port == IP_NULL);
ipc_port_t send_port = ipc_port_make_send(work_interval_port);
assert(IP_VALID(send_port));
work_interval->wi_port = work_interval_port;
return send_port;
}
static struct work_interval *
work_interval_port_convert_locked(ipc_port_t port)
{
struct work_interval *work_interval = NULL;
if (!IP_VALID(port))
return NULL;
if (!ip_active(port))
return NULL;
if (IKOT_WORK_INTERVAL != ip_kotype(port))
return NULL;
work_interval = (struct work_interval *)port->ip_kobject;
wi_retain(work_interval);
return work_interval;
}
static kern_return_t
port_name_to_work_interval(mach_port_name_t name,
struct work_interval **work_interval)
{
if (!MACH_PORT_VALID(name))
return KERN_INVALID_NAME;
ipc_port_t port = IPC_PORT_NULL;
kern_return_t kr = KERN_SUCCESS;
kr = ipc_port_translate_send(current_space(), name, &port);
if (kr != KERN_SUCCESS)
return kr;
assert(IP_VALID(port));
struct work_interval *converted_work_interval;
converted_work_interval = work_interval_port_convert_locked(port);
if (converted_work_interval == NULL)
kr = KERN_INVALID_CAPABILITY;
ip_unlock(port);
if (kr == KERN_SUCCESS)
*work_interval = converted_work_interval;
return kr;
}
void
work_interval_port_notify(mach_msg_header_t *msg)
{
mach_no_senders_notification_t *notification = (void *)msg;
ipc_port_t port = notification->not_header.msgh_remote_port;
struct work_interval *work_interval = NULL;
if (!IP_VALID(port))
panic("work_interval_port_notify(): invalid port");
ip_lock(port);
if (!ip_active(port))
panic("work_interval_port_notify(): inactive port %p", port);
if (ip_kotype(port) != IKOT_WORK_INTERVAL)
panic("work_interval_port_notify(): not the right kobject: %p, %d\n",
port, ip_kotype(port));
if (port->ip_mscount != notification->not_count)
panic("work_interval_port_notify(): unexpected make-send count: %p, %d, %d",
port, port->ip_mscount, notification->not_count);
if (port->ip_srights != 0)
panic("work_interval_port_notify(): unexpected send right count: %p, %d",
port, port->ip_srights);
work_interval = (struct work_interval *)port->ip_kobject;
if (work_interval == NULL)
panic("work_interval_port_notify(): missing kobject: %p", port);
ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE);
work_interval->wi_port = MACH_PORT_NULL;
ip_unlock(port);
ipc_port_dealloc_kernel(port);
wi_release(work_interval);
}
static void
thread_set_work_interval(thread_t thread,
struct work_interval *work_interval)
{
assert(thread == current_thread());
struct work_interval *old_th_wi = thread->th_work_interval;
thread->th_work_interval = work_interval;
if (old_th_wi != NULL)
wi_release(old_th_wi);
}
void
work_interval_thread_terminate(thread_t thread)
{
if (thread->th_work_interval != NULL)
thread_set_work_interval(thread, NULL);
}
kern_return_t
kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_args)
{
assert(thread == current_thread());
assert(kwi_args->work_interval_id != 0);
struct work_interval *work_interval = thread->th_work_interval;
if (work_interval == NULL ||
work_interval->wi_id != kwi_args->work_interval_id) {
return (KERN_INVALID_ARGUMENT);
}
task_t notifying_task = current_task();
if (work_interval->wi_creator_uniqueid != get_task_uniqueid(notifying_task) ||
work_interval->wi_creator_pidversion != get_task_version(notifying_task)) {
return (KERN_INVALID_ARGUMENT);
}
spl_t s = splsched();
uint64_t urgency_param1, urgency_param2;
kwi_args->urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
splx(s);
machine_work_interval_notify(thread, kwi_args);
return (KERN_SUCCESS);
}
static _Atomic uint64_t unique_work_interval_id = 1;
kern_return_t
kern_work_interval_create(thread_t thread,
struct kern_work_interval_create_args *create_params)
{
assert(thread == current_thread());
if (thread->th_work_interval != NULL) {
return (KERN_FAILURE);
}
struct work_interval *work_interval = kalloc(sizeof(*work_interval));
if (work_interval == NULL)
panic("failed to allocate work_interval");
bzero(work_interval, sizeof(*work_interval));
uint64_t old_value = atomic_fetch_add_explicit(&unique_work_interval_id, 1,
memory_order_relaxed);
uint64_t work_interval_id = old_value + 1;
uint32_t create_flags = create_params->wica_create_flags;
task_t creating_task = current_task();
*work_interval = (struct work_interval) {
.wi_id = work_interval_id,
.wi_ref_count = 1,
.wi_create_flags = create_flags,
.wi_creator_pid = pid_from_task(creating_task),
.wi_creator_uniqueid = get_task_uniqueid(creating_task),
.wi_creator_pidversion = get_task_version(creating_task),
};
if (create_flags & WORK_INTERVAL_FLAG_JOINABLE) {
ipc_port_t port = work_interval_port_alloc(work_interval);
mach_port_name_t name = MACH_PORT_NULL;
name = ipc_port_copyout_send(port, current_space());
if (!MACH_PORT_VALID(name)) {
return KERN_RESOURCE_SHORTAGE;
}
create_params->wica_port = name;
} else {
thread_set_work_interval(thread, work_interval);
create_params->wica_port = MACH_PORT_NULL;
}
create_params->wica_id = work_interval_id;
return KERN_SUCCESS;
}
kern_return_t
kern_work_interval_destroy(thread_t thread,
uint64_t work_interval_id)
{
if (work_interval_id == 0)
return KERN_INVALID_ARGUMENT;
if (thread->th_work_interval == NULL ||
thread->th_work_interval->wi_id != work_interval_id) {
return (KERN_INVALID_ARGUMENT);
}
thread_set_work_interval(thread, NULL);
return KERN_SUCCESS;
}
kern_return_t
kern_work_interval_join(thread_t thread,
mach_port_name_t port_name)
{
struct work_interval *work_interval = NULL;
kern_return_t kr;
if (port_name == MACH_PORT_NULL) {
thread_set_work_interval(thread, NULL);
return KERN_SUCCESS;
}
kr = port_name_to_work_interval(port_name, &work_interval);
if (kr != KERN_SUCCESS)
return kr;
assert(work_interval != NULL);
thread_set_work_interval(thread, work_interval);
return KERN_SUCCESS;
}