#include <mach_ldebug.h>
#define LOCK_PRIVATE 1
#include <vm/pmap.h>
#include <kern/kalloc.h>
#include <kern/locks.h>
#include <kern/misc_protos.h>
#include <kern/thread.h>
#include <kern/processor.h>
#include <kern/sched_prim.h>
#include <kern/debug.h>
#include <string.h>
#include <tests/xnupost.h>
#if MACH_KDB
#include <ddb/db_command.h>
#include <ddb/db_output.h>
#include <ddb/db_sym.h>
#include <ddb/db_print.h>
#endif
#include <sys/kdebug.h>
#include <sys/munge.h>
#include <machine/cpu_capabilities.h>
#include <arm/cpu_data_internal.h>
#include <arm/pmap.h>
kern_return_t arm64_lock_test(void);
kern_return_t arm64_munger_test(void);
kern_return_t ex_cb_test(void);
kern_return_t arm64_pan_test(void);
kern_return_t arm64_late_pan_test(void);
#if defined(HAS_APPLE_PAC)
#include <ptrauth.h>
kern_return_t arm64_ropjop_test(void);
#endif
#if defined(KERNEL_INTEGRITY_CTRR)
kern_return_t ctrr_test(void);
kern_return_t ctrr_test_cpu(void);
#endif
#if HAS_TWO_STAGE_SPR_LOCK
kern_return_t arm64_spr_lock_test(void);
extern void arm64_msr_lock_test(uint64_t);
#endif
#if __ARM_PAN_AVAILABLE__
const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
vm_offset_t pan_test_addr = 0;
vm_offset_t pan_ro_addr = 0;
volatile int pan_exception_level = 0;
volatile char pan_fault_value = 0;
#endif
#include <libkern/OSAtomic.h>
#define LOCK_TEST_ITERATIONS 50
static hw_lock_data_t lt_hw_lock;
static lck_spin_t lt_lck_spin_t;
static lck_mtx_t lt_mtx;
static lck_rw_t lt_rwlock;
static volatile uint32_t lt_counter = 0;
static volatile int lt_spinvolatile;
static volatile uint32_t lt_max_holders = 0;
static volatile uint32_t lt_upgrade_holders = 0;
static volatile uint32_t lt_max_upgrade_holders = 0;
static volatile uint32_t lt_num_holders = 0;
static volatile uint32_t lt_done_threads;
static volatile uint32_t lt_target_done_threads;
static volatile uint32_t lt_cpu_bind_id = 0;
static void
lt_note_another_blocking_lock_holder()
{
hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
lt_num_holders++;
lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
hw_lock_unlock(<_hw_lock);
}
static void
lt_note_blocking_lock_release()
{
hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
lt_num_holders--;
hw_lock_unlock(<_hw_lock);
}
static void
lt_spin_a_little_bit()
{
uint32_t i;
for (i = 0; i < 10000; i++) {
lt_spinvolatile++;
}
}
static void
lt_sleep_a_little_bit()
{
delay(100);
}
static void
lt_grab_mutex()
{
lck_mtx_lock(<_mtx);
lt_note_another_blocking_lock_holder();
lt_sleep_a_little_bit();
lt_counter++;
lt_note_blocking_lock_release();
lck_mtx_unlock(<_mtx);
}
static void
lt_grab_mutex_with_try()
{
while (0 == lck_mtx_try_lock(<_mtx)) {
;
}
lt_note_another_blocking_lock_holder();
lt_sleep_a_little_bit();
lt_counter++;
lt_note_blocking_lock_release();
lck_mtx_unlock(<_mtx);
}
static void
lt_grab_rw_exclusive()
{
lck_rw_lock_exclusive(<_rwlock);
lt_note_another_blocking_lock_holder();
lt_sleep_a_little_bit();
lt_counter++;
lt_note_blocking_lock_release();
lck_rw_done(<_rwlock);
}
static void
lt_grab_rw_exclusive_with_try()
{
while (0 == lck_rw_try_lock_exclusive(<_rwlock)) {
lt_sleep_a_little_bit();
}
lt_note_another_blocking_lock_holder();
lt_sleep_a_little_bit();
lt_counter++;
lt_note_blocking_lock_release();
lck_rw_done(<_rwlock);
}
static void
lt_upgrade_downgrade_rw()
{
boolean_t upgraded, success;
success = lck_rw_try_lock_shared(<_rwlock);
if (!success) {
lck_rw_lock_shared(<_rwlock);
}
lt_note_another_blocking_lock_holder();
lt_sleep_a_little_bit();
lt_note_blocking_lock_release();
upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock);
if (!upgraded) {
success = lck_rw_try_lock_exclusive(<_rwlock);
if (!success) {
lck_rw_lock_exclusive(<_rwlock);
}
}
lt_upgrade_holders++;
if (lt_upgrade_holders > lt_max_upgrade_holders) {
lt_max_upgrade_holders = lt_upgrade_holders;
}
lt_counter++;
lt_sleep_a_little_bit();
lt_upgrade_holders--;
lck_rw_lock_exclusive_to_shared(<_rwlock);
lt_spin_a_little_bit();
lck_rw_done(<_rwlock);
}
#if __AMP__
const int limit = 1000000;
static int lt_stress_local_counters[MAX_CPUS];
lck_ticket_t lt_ticket_lock;
static void
lt_stress_ticket_lock()
{
int local_counter = 0;
uint cpuid = current_processor()->cpu_id;
kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
lck_ticket_lock(<_ticket_lock);
lt_counter++;
local_counter++;
lck_ticket_unlock(<_ticket_lock);
while (lt_counter < lt_target_done_threads) {
;
}
kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
while (lt_counter < limit) {
lck_ticket_lock(<_ticket_lock);
if (lt_counter < limit) {
lt_counter++;
local_counter++;
}
lck_ticket_unlock(<_ticket_lock);
}
lt_stress_local_counters[cpuid] = local_counter;
kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
}
#endif
static void
lt_grab_hw_lock()
{
hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
lt_counter++;
lt_spin_a_little_bit();
hw_lock_unlock(<_hw_lock);
}
static void
lt_grab_hw_lock_with_try()
{
while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) {
;
}
lt_counter++;
lt_spin_a_little_bit();
hw_lock_unlock(<_hw_lock);
}
static void
lt_grab_hw_lock_with_to()
{
while (0 == hw_lock_to(<_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
mp_enable_preemption();
}
lt_counter++;
lt_spin_a_little_bit();
hw_lock_unlock(<_hw_lock);
}
static void
lt_grab_spin_lock()
{
lck_spin_lock(<_lck_spin_t);
lt_counter++;
lt_spin_a_little_bit();
lck_spin_unlock(<_lck_spin_t);
}
static void
lt_grab_spin_lock_with_try()
{
while (0 == lck_spin_try_lock(<_lck_spin_t)) {
;
}
lt_counter++;
lt_spin_a_little_bit();
lck_spin_unlock(<_lck_spin_t);
}
static volatile boolean_t lt_thread_lock_grabbed;
static volatile boolean_t lt_thread_lock_success;
static void
lt_reset()
{
lt_counter = 0;
lt_max_holders = 0;
lt_num_holders = 0;
lt_max_upgrade_holders = 0;
lt_upgrade_holders = 0;
lt_done_threads = 0;
lt_target_done_threads = 0;
lt_cpu_bind_id = 0;
OSMemoryBarrier();
}
static void
lt_trylock_hw_lock_with_to()
{
OSMemoryBarrier();
while (!lt_thread_lock_grabbed) {
lt_sleep_a_little_bit();
OSMemoryBarrier();
}
lt_thread_lock_success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL);
OSMemoryBarrier();
mp_enable_preemption();
}
static void
lt_trylock_spin_try_lock()
{
OSMemoryBarrier();
while (!lt_thread_lock_grabbed) {
lt_sleep_a_little_bit();
OSMemoryBarrier();
}
lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t);
OSMemoryBarrier();
}
static void
lt_trylock_thread(void *arg, wait_result_t wres __unused)
{
void (*func)(void) = (void (*)(void))arg;
func();
OSIncrementAtomic((volatile SInt32*) <_done_threads);
}
static void
lt_start_trylock_thread(thread_continue_t func)
{
thread_t thread;
kern_return_t kr;
kr = kernel_thread_start(lt_trylock_thread, func, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
}
static void
lt_wait_for_lock_test_threads()
{
OSMemoryBarrier();
while (lt_done_threads < lt_target_done_threads) {
lt_sleep_a_little_bit();
OSMemoryBarrier();
}
OSMemoryBarrier();
}
static kern_return_t
lt_test_trylocks()
{
boolean_t success;
extern unsigned int real_ncpus;
success = lck_mtx_try_lock(<_mtx);
T_ASSERT_NOTNULL(success, "First mtx try lock");
success = lck_mtx_try_lock(<_mtx);
T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
lck_mtx_unlock(<_mtx);
lck_mtx_lock(<_mtx);
success = lck_mtx_try_lock(<_mtx);
T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
lck_mtx_unlock(<_mtx);
success = lck_rw_try_lock_shared(<_rwlock);
T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
success = lck_rw_try_lock_shared(<_rwlock);
T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
success = lck_rw_try_lock_exclusive(<_rwlock);
T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
lck_rw_done(<_rwlock);
lck_rw_done(<_rwlock);
lck_rw_lock_shared(<_rwlock);
success = lck_rw_try_lock_shared(<_rwlock);
T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
success = lck_rw_try_lock_exclusive(<_rwlock);
T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
lck_rw_done(<_rwlock);
lck_rw_done(<_rwlock);
success = lck_rw_try_lock_exclusive(<_rwlock);
T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
success = lck_rw_try_lock_shared(<_rwlock);
T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
success = lck_rw_try_lock_exclusive(<_rwlock);
T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
lck_rw_done(<_rwlock);
lck_rw_lock_exclusive(<_rwlock);
success = lck_rw_try_lock_shared(<_rwlock);
T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
success = lck_rw_try_lock_exclusive(<_rwlock);
T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
lck_rw_done(<_rwlock);
success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
hw_lock_unlock(<_hw_lock);
hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
success = hw_lock_try(<_hw_lock, LCK_GRP_NULL);
T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
hw_lock_unlock(<_hw_lock);
lt_reset();
lt_thread_lock_grabbed = false;
lt_thread_lock_success = true;
lt_target_done_threads = 1;
OSMemoryBarrier();
lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL);
T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
if (real_ncpus == 1) {
mp_enable_preemption();
}
OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
lt_wait_for_lock_test_threads();
T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
if (real_ncpus == 1) {
mp_disable_preemption();
}
hw_lock_unlock(<_hw_lock);
lt_reset();
lt_thread_lock_grabbed = false;
lt_thread_lock_success = true;
lt_target_done_threads = 1;
OSMemoryBarrier();
lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
hw_lock_lock(<_hw_lock, LCK_GRP_NULL);
if (real_ncpus == 1) {
mp_enable_preemption();
}
OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
lt_wait_for_lock_test_threads();
T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
if (real_ncpus == 1) {
mp_disable_preemption();
}
hw_lock_unlock(<_hw_lock);
success = lck_spin_try_lock(<_lck_spin_t);
T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
success = lck_spin_try_lock(<_lck_spin_t);
T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
lck_spin_unlock(<_lck_spin_t);
lt_reset();
lt_thread_lock_grabbed = false;
lt_thread_lock_success = true;
lt_target_done_threads = 1;
lt_start_trylock_thread(lt_trylock_spin_try_lock);
lck_spin_lock(<_lck_spin_t);
if (real_ncpus == 1) {
mp_enable_preemption();
}
OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed);
lt_wait_for_lock_test_threads();
T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
if (real_ncpus == 1) {
mp_disable_preemption();
}
lck_spin_unlock(<_lck_spin_t);
return KERN_SUCCESS;
}
static void
lt_thread(void *arg, wait_result_t wres __unused)
{
void (*func)(void) = (void (*)(void))arg;
uint32_t i;
for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
func();
}
OSIncrementAtomic((volatile SInt32*) <_done_threads);
}
static void
lt_start_lock_thread(thread_continue_t func)
{
thread_t thread;
kern_return_t kr;
kr = kernel_thread_start(lt_thread, func, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
}
#if __AMP__
static void
lt_bound_thread(void *arg, wait_result_t wres __unused)
{
void (*func)(void) = (void (*)(void))arg;
int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id);
processor_t processor = processor_list;
while ((processor != NULL) && (processor->cpu_id != cpuid)) {
processor = processor->processor_list;
}
if (processor != NULL) {
thread_bind(processor);
}
thread_block(THREAD_CONTINUE_NULL);
func();
OSIncrementAtomic((volatile SInt32*) <_done_threads);
}
static void
lt_e_thread(void *arg, wait_result_t wres __unused)
{
void (*func)(void) = (void (*)(void))arg;
thread_t thread = current_thread();
spl_t s = splsched();
thread_lock(thread);
thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
thread_unlock(thread);
splx(s);
thread_block(THREAD_CONTINUE_NULL);
func();
OSIncrementAtomic((volatile SInt32*) <_done_threads);
}
static void
lt_p_thread(void *arg, wait_result_t wres __unused)
{
void (*func)(void) = (void (*)(void))arg;
thread_t thread = current_thread();
spl_t s = splsched();
thread_lock(thread);
thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
thread_unlock(thread);
splx(s);
thread_block(THREAD_CONTINUE_NULL);
func();
OSIncrementAtomic((volatile SInt32*) <_done_threads);
}
static void
lt_start_lock_thread_e(thread_continue_t func)
{
thread_t thread;
kern_return_t kr;
kr = kernel_thread_start(lt_e_thread, func, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
}
static void
lt_start_lock_thread_p(thread_continue_t func)
{
thread_t thread;
kern_return_t kr;
kr = kernel_thread_start(lt_p_thread, func, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
}
static void
lt_start_lock_thread_bound(thread_continue_t func)
{
thread_t thread;
kern_return_t kr;
kr = kernel_thread_start(lt_bound_thread, func, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
}
#endif
static kern_return_t
lt_test_locks()
{
kern_return_t kr = KERN_SUCCESS;
lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL);
lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL);
lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL);
hw_lock_init(<_hw_lock);
T_LOG("Testing locks.");
lt_reset();
T_LOG("Running try lock test.");
kr = lt_test_trylocks();
T_EXPECT_NULL(kr, "try lock test failed.");
T_LOG("Running uncontended mutex test.");
lt_reset();
lt_target_done_threads = 1;
lt_start_lock_thread(lt_grab_mutex);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running contended mutex test.");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_mutex);
lt_start_lock_thread(lt_grab_mutex);
lt_start_lock_thread(lt_grab_mutex);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running contended mutex trylock test.");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_mutex_with_try);
lt_start_lock_thread(lt_grab_mutex_with_try);
lt_start_lock_thread(lt_grab_mutex_with_try);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running uncontended exclusive rwlock test.");
lt_reset();
lt_target_done_threads = 1;
lt_start_lock_thread(lt_grab_rw_exclusive);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running contended exclusive rwlock test.");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_rw_exclusive);
lt_start_lock_thread(lt_grab_rw_exclusive);
lt_start_lock_thread(lt_grab_rw_exclusive);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running test with threads upgrading and downgrading.");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_upgrade_downgrade_rw);
lt_start_lock_thread(lt_upgrade_downgrade_rw);
lt_start_lock_thread(lt_upgrade_downgrade_rw);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
lt_reset();
lt_target_done_threads = 1;
lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running test with threads doing exclusive rwlock trylocks.");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
T_LOG("Running test with hw_lock_lock()");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_hw_lock);
lt_start_lock_thread(lt_grab_hw_lock);
lt_start_lock_thread(lt_grab_hw_lock);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
#if __AMP__
T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
extern unsigned int real_ncpus;
lck_ticket_init(<_ticket_lock);
lt_reset();
lt_target_done_threads = real_ncpus;
for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
lt_start_lock_thread_bound(lt_stress_ticket_lock);
}
lt_wait_for_lock_test_threads();
bool starvation = false;
uint total_local_count = 0;
for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
total_local_count += lt_stress_local_counters[processor->cpu_id];
}
if (total_local_count != lt_counter) {
T_FAIL("Lock failure\n");
} else if (starvation) {
T_FAIL("Lock starvation found\n");
} else {
T_PASS("Ticket locks stress test with lck_ticket_lock()");
}
T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
lt_reset();
lt_target_done_threads = real_ncpus;
for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
processor_set_t pset = processor->processor_set;
if (pset->pset_cluster_type == PSET_AMP_P) {
lt_start_lock_thread_p(lt_stress_ticket_lock);
} else if (pset->pset_cluster_type == PSET_AMP_E) {
lt_start_lock_thread_e(lt_stress_ticket_lock);
} else {
lt_start_lock_thread(lt_stress_ticket_lock);
}
}
lt_wait_for_lock_test_threads();
#endif
T_LOG("Running test with hw_lock_try()");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_hw_lock_with_try);
lt_start_lock_thread(lt_grab_hw_lock_with_try);
lt_start_lock_thread(lt_grab_hw_lock_with_try);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_LOG("Running test with hw_lock_to()");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_hw_lock_with_to);
lt_start_lock_thread(lt_grab_hw_lock_with_to);
lt_start_lock_thread(lt_grab_hw_lock_with_to);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_LOG("Running test with lck_spin_lock()");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_spin_lock);
lt_start_lock_thread(lt_grab_spin_lock);
lt_start_lock_thread(lt_grab_spin_lock);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
T_LOG("Running test with lck_spin_try_lock()");
lt_reset();
lt_target_done_threads = 3;
lt_start_lock_thread(lt_grab_spin_lock_with_try);
lt_start_lock_thread(lt_grab_spin_lock_with_try);
lt_start_lock_thread(lt_grab_spin_lock_with_try);
lt_wait_for_lock_test_threads();
T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
return KERN_SUCCESS;
}
#define MT_MAX_ARGS 8
#define MT_INITIAL_VALUE 0xfeedbeef
#define MT_W_VAL (0x00000000feedbeefULL)
#define MT_S_VAL (0xfffffffffeedbeefULL)
#define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32))
typedef void (*sy_munge_t)(void*);
#define MT_FUNC(x) #x, x
struct munger_test {
const char *mt_name;
sy_munge_t mt_func;
uint32_t mt_in_words;
uint32_t mt_nout;
uint64_t mt_expected[MT_MAX_ARGS];
} munger_tests[] = {
{MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
{MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
{MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
{MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
{MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
{MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
{MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
{MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
{MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
{MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
};
#define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
static void
mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
{
uint32_t i;
for (i = 0; i < in_words; i++) {
data[i] = MT_INITIAL_VALUE;
}
if (in_words * sizeof(uint32_t) < total_size) {
bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
}
}
static void
mt_test_mungers()
{
uint64_t data[MT_MAX_ARGS];
uint32_t i, j;
for (i = 0; i < MT_TEST_COUNT; i++) {
struct munger_test *test = &munger_tests[i];
int pass = 1;
T_LOG("Testing %s", test->mt_name);
mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
test->mt_func(data);
for (j = 0; j < test->mt_nout; j++) {
if (data[j] != test->mt_expected[j]) {
T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
pass = 0;
}
}
if (pass) {
T_PASS(test->mt_name);
}
}
}
static ex_cb_action_t
excb_test_action(
ex_cb_class_t cb_class,
void *refcon,
const ex_cb_state_t *state
)
{
ex_cb_state_t *context = (ex_cb_state_t *)refcon;
if ((NULL == refcon) || (NULL == state)) {
return EXCB_ACTION_TEST_FAIL;
}
context->far = state->far;
switch (cb_class) {
case EXCB_CLASS_TEST1:
return EXCB_ACTION_RERUN;
case EXCB_CLASS_TEST2:
return EXCB_ACTION_NONE;
default:
return EXCB_ACTION_TEST_FAIL;
}
}
kern_return_t
ex_cb_test()
{
const vm_offset_t far1 = 0xdead0001;
const vm_offset_t far2 = 0xdead0002;
kern_return_t kr;
ex_cb_state_t test_context_1 = {0xdeadbeef};
ex_cb_state_t test_context_2 = {0xdeadbeef};
ex_cb_action_t action;
T_LOG("Testing Exception Callback.");
T_LOG("Running registration test.");
kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
T_LOG("Running invocation test.");
action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
T_ASSERT(far1 == test_context_1.far, NULL);
action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
T_ASSERT(EXCB_ACTION_NONE == action, NULL);
T_ASSERT(far2 == test_context_2.far, NULL);
action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
T_ASSERT(EXCB_ACTION_NONE == action, NULL);
return KERN_SUCCESS;
}
#if defined(HAS_APPLE_PAC)
kern_return_t
arm64_ropjop_test()
{
T_LOG("Testing ROP/JOP");
boolean_t config_rop_enabled = TRUE;
boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP);
uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
#if __APSTS_SUPPORTED__
uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL);
#else
T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL);
#endif
T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL);
T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL);
bool status_jop_enabled, status_rop_enabled;
#if __APSTS_SUPPORTED__
status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
#elif __APCFG_SUPPORTED__
uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
#else
uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
#endif
T_ASSERT(config_rop_enabled == status_rop_enabled, NULL);
T_ASSERT(config_jop_enabled == status_jop_enabled, NULL);
if (config_jop_enabled) {
uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
}
if (config_rop_enabled) {
uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
uint64_t kva_corrupted = kva_signed ^ 1;
kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
uint64_t auth_fail_mask = 3ULL << 61;
uint64_t authib_fail = 2ULL << 61;
T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
}
return KERN_SUCCESS;
}
#endif
#if __ARM_PAN_AVAILABLE__
struct pan_test_thread_args {
volatile bool join;
};
static void
arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
{
T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
struct pan_test_thread_args *args = arg;
for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
thread_bind(p);
thread_block(THREAD_CONTINUE_NULL);
kprintf("Running PAN test on cpu %d\n", p->cpu_id);
arm64_pan_test();
}
thread_bind(PROCESSOR_NULL);
thread_block(THREAD_CONTINUE_NULL);
while (!args->join) {
;
}
thread_wakeup(args);
}
kern_return_t
arm64_late_pan_test()
{
thread_t thread;
kern_return_t kr;
struct pan_test_thread_args args;
args.join = false;
kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
assert(kr == KERN_SUCCESS);
thread_deallocate(thread);
assert_wait(&args, THREAD_UNINT);
args.join = true;
thread_block(THREAD_CONTINUE_NULL);
return KERN_SUCCESS;
}
kern_return_t
arm64_pan_test()
{
vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
T_LOG("Testing PAN.");
T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
pan_exception_level = 0;
pan_fault_value = 0xDE;
pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
_COMM_PAGE_START_ADDRESS;
T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
T_ASSERT(pan_exception_level == 2, NULL);
T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
pan_exception_level = 0;
pan_fault_value = 0xAD;
pan_ro_addr = (vm_offset_t) &pan_ro_value;
*((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
T_ASSERT(pan_exception_level == 2, NULL);
T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
pan_test_addr = 0;
pan_ro_addr = 0;
__builtin_arm_wsr("pan", 1);
return KERN_SUCCESS;
}
#endif
kern_return_t
arm64_lock_test()
{
return lt_test_locks();
}
kern_return_t
arm64_munger_test()
{
mt_test_mungers();
return 0;
}
#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
uint64_t ctrr_nx_test = 0xd65f03c0;
volatile uint64_t ctrr_exception_esr;
vm_offset_t ctrr_test_va;
vm_offset_t ctrr_test_page;
kern_return_t
ctrr_test(void)
{
processor_t p;
boolean_t ctrr_disable = FALSE;
PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
if (ctrr_disable) {
T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
return KERN_SUCCESS;
}
T_LOG("Running CTRR test.");
for (p = processor_list; p != NULL; p = p->processor_list) {
thread_bind(p);
thread_block(THREAD_CONTINUE_NULL);
T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
ctrr_test_cpu();
}
thread_bind(PROCESSOR_NULL);
thread_block(THREAD_CONTINUE_NULL);
return KERN_SUCCESS;
}
kern_return_t
ctrr_test_cpu(void)
{
ppnum_t ro_pn, nx_pn;
uint64_t *ctrr_ro_test_ptr;
void (*ctrr_nx_test_ptr)(void);
kern_return_t kr;
uint64_t prot = 0;
extern uint64_t rorgn_begin, rorgn_end;
extern vm_offset_t virtual_space_start;
vm_offset_t rorgn_begin_va = phystokv(rorgn_begin);
vm_offset_t rorgn_end_va = phystokv(rorgn_end) + PAGE_SIZE;
vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
(void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
ctrr_ro_test_ptr = (void *)ctrr_test_va;
T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
*ctrr_ro_test_ptr = 1;
T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
ctrr_test_va = 0;
ctrr_exception_esr = 0;
pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
ctrr_nx_test_ptr = (void *)ctrr_test_va;
T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
#if __has_feature(ptrauth_calls)
ctrr_nx_test_ptr = ptrauth_sign_unauthenticated(ctrr_nx_test_ptr, ptrauth_key_function_pointer, 0);
#endif
ctrr_nx_test_ptr();
T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
ctrr_test_va = 0;
ctrr_exception_esr = 0;
pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
return KERN_SUCCESS;
}
#endif
#if HAS_TWO_STAGE_SPR_LOCK
#define STR1(x) #x
#define STR(x) STR1(x)
volatile vm_offset_t spr_lock_test_addr;
volatile uint32_t spr_lock_exception_esr;
kern_return_t
arm64_spr_lock_test()
{
processor_t p;
for (p = processor_list; p != NULL; p = p->processor_list) {
thread_bind(p);
thread_block(THREAD_CONTINUE_NULL);
T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id);
uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test);
spr_lock_exception_esr = 0;
arm64_msr_lock_test(~orig_value);
T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort");
uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
T_EXPECT(orig_value == new_value, "MSR write did not succeed");
spr_lock_test_addr = 0;
}
thread_bind(PROCESSOR_NULL);
thread_block(THREAD_CONTINUE_NULL);
T_PASS("Done running SPR lock tests");
return KERN_SUCCESS;
}
#endif