#include <pexpert/arm64/board_config.h>
#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
#include <vm/pmap.h>
#include <libkern/section_keywords.h>
#include <libkern/kernel_mach_header.h>
#include <pexpert/pexpert.h>
#include <pexpert/device_tree.h>
#include <machine/atomic.h>
#include <arm/cpu_internal.h>
#include <arm/caches_internal.h>
#include <arm/machine_routines.h>
#include <arm/pmap.h>
#include <arm64/tlb.h>
#include <arm64/amcc_rorgn.h>
#if HIBERNATION
#include <arm64/pal_hibernate.h>
#endif
#if HAS_IOA
#define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA)
#define IOA_LOCK_GROUP 1 // IOA lock group index
#else
#define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC)
#endif
#define AMCC_LOCK_GROUP 0 // AMCC lock group index
#define MAX_APERTURES 16 // Maximum number of register apertures
#define MAX_PLANES 16 // Maximum number of planes within each aperture
#define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group
#define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group
#define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type
extern vm_offset_t segLOWESTRO;
extern vm_offset_t segHIGHESTRO;
extern vm_offset_t segLASTB;
extern vm_offset_t segTEXTEXECB;
extern unsigned long segSizeLAST;
extern unsigned long segSizeLASTDATACONST;
extern unsigned long segSizeTEXTEXEC;
typedef struct lock_reg {
uint32_t reg_offset; uint32_t reg_mask; uint32_t reg_value; } lock_reg_t;
typedef struct lock_type {
uint32_t page_size_shift; lock_reg_t lower_limit_reg; lock_reg_t upper_limit_reg; lock_reg_t enable_reg; lock_reg_t write_disable_reg; lock_reg_t lock_reg; } lock_type_t;
typedef struct lock_group {
uint32_t aperture_count; uint32_t aperture_size; uint32_t plane_count; uint32_t plane_stride; uint64_t aperture_phys_addr[MAX_APERTURES]; lock_reg_t cache_status_reg; #if HAS_IOA
lock_reg_t master_lock_reg; #endif
lock_type_t ctrr_a; } lock_group_t;
SECURITY_READ_ONLY_LATE(lock_group_t) _lock_group[MAX_LOCK_GROUPS] = { {0} };
SECURITY_READ_ONLY_LATE(bool) lock_regs_set = false;
static vm_offset_t rorgn_begin = 0;
static vm_offset_t rorgn_end = 0;
SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_begin = 0;
SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_end = 0;
static uint64_t lock_group_va[MAX_LOCK_GROUPS][MAX_APERTURES];
#if CONFIG_CSR_FROM_DT
SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text = false;
#endif
#if defined(KERNEL_INTEGRITY_KTRR)
#define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1
#elif defined(KERNEL_INTEGRITY_CTRR)
#define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1
#endif
static bool
_dt_get_uint32(DTEntry node, char const *name, uint32_t *dest)
{
uint32_t const *value;
unsigned int size;
if (SecureDTGetProperty(node, name, (void const **)&value, &size) != kSuccess) {
return false;
}
if (size != sizeof(uint32_t)) {
panic("lock-regs: unexpected size %u", size);
}
*dest = *value;
return true;
}
static uint32_t
_dt_get_uint32_required(DTEntry node, char const *name)
{
uint32_t value;
if (!_dt_get_uint32(node, name, &value)) {
panic("lock-regs: cannot find required property '%s'", name);
}
return value;
}
static bool
_dt_get_lock_reg(DTEntry node, lock_reg_t *reg, const char *parent_name, const char *reg_name, bool required, bool with_value)
{
char prop_name[32];
bool found;
snprintf(prop_name, sizeof(prop_name), "%s-reg-offset", reg_name);
found = _dt_get_uint32(node, prop_name, ®->reg_offset);
if (!found) {
if (required) {
panic("%s: missing property '%s'", parent_name, prop_name);
} else {
return false;
}
}
snprintf(prop_name, sizeof(prop_name), "%s-reg-mask", reg_name);
found = _dt_get_uint32(node, prop_name, ®->reg_mask);
if (!found) {
panic("%s: missing property '%s'", parent_name, prop_name);
}
if (with_value) {
snprintf(prop_name, sizeof(prop_name), "%s-reg-value", reg_name);
found = _dt_get_uint32(node, prop_name, ®->reg_value);
if (!found) {
panic("%s: missing property '%s'", parent_name, prop_name);
}
}
return true;
}
static DTEntry
_dt_get_lock_group(DTEntry lock_regs_node, lock_group_t* lock_group, const char *group_name, uint32_t options)
{
DTEntry group_node;
if (SecureDTLookupEntry(lock_regs_node, group_name, &group_node) != kSuccess) {
panic("lock-regs: /chosen/lock-regs/%s not found", group_name);
}
lock_group->aperture_count = _dt_get_uint32_required(group_node, "aperture-count");
if (lock_group->aperture_count > MAX_APERTURES) {
panic("%s: %s %u exceeds maximum %u", group_name, "aperture-count", lock_group->aperture_count, MAX_APERTURES);
}
lock_group->aperture_size = _dt_get_uint32_required(group_node, "aperture-size");
if ((lock_group->aperture_count > 0) && (lock_group->aperture_size == 0)) {
panic("%s: have %u apertures, but 0 size", group_name, lock_group->aperture_count);
}
lock_group->plane_count = _dt_get_uint32_required(group_node, "plane-count");
if (lock_group->plane_count > MAX_PLANES) {
panic("%s: %s %u exceeds maximum %u", group_name, "plane-count", lock_group->plane_count, MAX_PLANES);
}
if (!_dt_get_uint32(group_node, "plane-stride", &lock_group->plane_stride)) {
lock_group->plane_stride = 0;
}
if (lock_group->plane_count > 1) {
uint32_t aperture_size;
if (lock_group->plane_stride == 0) {
panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name, lock_group->plane_count);
}
if (os_mul_overflow(lock_group->plane_count, lock_group->plane_stride, &aperture_size)
|| (aperture_size > lock_group->aperture_size)) {
panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name, lock_group->aperture_size, lock_group->plane_count, lock_group->plane_stride);
}
}
uint64_t const *phys_bases = NULL;
unsigned int prop_size;
if (SecureDTGetProperty(group_node, "aperture-phys-addr", (const void**)&phys_bases, &prop_size) != kSuccess) {
panic("%s: missing required %s", group_name, "aperture-phys-addr");
}
if (prop_size != lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])) {
panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)",
group_name, prop_size, lock_group->aperture_count, sizeof(lock_group->aperture_phys_addr[0]),
lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0]));
}
memcpy(lock_group->aperture_phys_addr, phys_bases, prop_size);
if (options & LOCK_GROUP_HAS_CACHE_STATUS_REG) {
_dt_get_lock_reg(group_node, &lock_group->cache_status_reg, group_name, "cache-status", true, true);
}
#if HAS_IOA
if (options & LOCK_GROUP_HAS_MASTER_LOCK_REG) {
_dt_get_lock_reg(group_node, &lock_group->master_lock_reg, group_name, "master-lock", true, true);
}
#endif
return group_node;
}
static void
_dt_get_lock_type(DTEntry group_node, lock_type_t *lock_type, const char *group_name, const char *type_name, uint32_t options)
{
DTEntry type_node;
bool has_lock = options & LOCK_TYPE_HAS_LOCK_REG;
if (SecureDTLookupEntry(group_node, type_name, &type_node) != kSuccess) {
panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name, type_name);
}
lock_type->page_size_shift = _dt_get_uint32_required(type_node, "page-size-shift");
_dt_get_lock_reg(type_node, &lock_type->lower_limit_reg, type_name, "lower-limit", true, false);
_dt_get_lock_reg(type_node, &lock_type->upper_limit_reg, type_name, "upper-limit", true, false);
_dt_get_lock_reg(type_node, &lock_type->lock_reg, type_name, "lock", has_lock, true);
_dt_get_lock_reg(type_node, &lock_type->enable_reg, type_name, "enable", false, true);
_dt_get_lock_reg(type_node, &lock_type->write_disable_reg, type_name, "write-disable", false, true);
}
static lock_group_t const * _Nonnull
find_lock_group_data(void)
{
DTEntry lock_regs_node = NULL;
DTEntry amcc_node = NULL;
if (lock_regs_set) {
return _lock_group;
}
if (SecureDTLookupEntry(NULL, "/chosen/lock-regs", &lock_regs_node) != kSuccess) {
panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)");
}
amcc_node = _dt_get_lock_group(lock_regs_node, &_lock_group[AMCC_LOCK_GROUP], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG);
_dt_get_lock_type(amcc_node, &_lock_group[AMCC_LOCK_GROUP].ctrr_a, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG);
#if HAS_IOA
DTEntry ioa_node = _dt_get_lock_group(lock_regs_node, &_lock_group[IOA_LOCK_GROUP], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG);
_dt_get_lock_type(ioa_node, &_lock_group[IOA_LOCK_GROUP].ctrr_a, "ioa", "ioa-ctrr-a", 0);
#endif
lock_regs_set = true;
return _lock_group;
}
void
rorgn_stash_range(void)
{
#if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT
boolean_t rorgn_disable = FALSE;
#if DEVELOPMENT || DEBUG
PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable));
#endif
#if CONFIG_CSR_FROM_DT
if (csr_unsafe_kernel_text) {
rorgn_disable = true;
}
#endif
if (rorgn_disable) {
return;
}
#endif
lock_group_t const * const lock_group = find_lock_group_data();
uint64_t rorgn_begin_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES];
uint64_t rorgn_end_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES];
for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
const uint64_t amcc_pa = lock_group[lg].aperture_phys_addr[aperture];
lock_group_va[lg][aperture] = ml_io_map(amcc_pa, lock_group[lg].aperture_size);
if (lock_group_va[lg][aperture] == 0) {
panic("map aperture_phys_addr[%u]/%#x failed", aperture, lock_group[lg].aperture_size);
}
for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) {
uint64_t reg_addr;
reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lower_limit_reg.reg_offset;
rorgn_begin_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr;
reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.upper_limit_reg.reg_offset;
rorgn_end_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr;
}
}
assert(rorgn_end_page[lg][0][0] > rorgn_begin_page[lg][0][0]);
for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) {
if ((rorgn_begin_page[lg][aperture][plane] != rorgn_begin_page[0][0][0])
|| (rorgn_end_page[lg][aperture][plane] != rorgn_end_page[0][0][0])) {
panic("Inconsistent memory config");
}
}
}
uint64_t page_bytes = 1ULL << lock_group[lg].ctrr_a.page_size_shift;
rorgn_begin = (rorgn_begin_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase;
rorgn_end = (rorgn_end_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase + page_bytes - 1;
}
assert(segLOWESTRO && gVirtBase && gPhysBase);
ctrr_begin = kvtophys(segLOWESTRO);
#if defined(KERNEL_INTEGRITY_KTRR)
ctrr_end = kvtophys(segLASTB) - segSizeLASTDATACONST - 1;
assert(!segHIGHESTRO);
assert(segSizeLAST == PAGE_SIZE);
assert((ctrr_end + 1) == kvtophys(segTEXTEXECB) + segSizeTEXTEXEC);
assert((rorgn_begin == ctrr_begin) && (rorgn_end == (ctrr_end + segSizeLASTDATACONST + segSizeLAST)));
#elif defined(KERNEL_INTEGRITY_CTRR)
if (segHIGHESTRO) {
assert(segLASTB + segSizeLAST <= segHIGHESTRO);
ctrr_end = kvtophys(segHIGHESTRO) - 1;
} else {
ctrr_end = kvtophys(segLASTB) + segSizeLAST - 1;
}
assert((rorgn_begin == ctrr_begin) && (rorgn_end == ctrr_end));
#endif
}
#if DEVELOPMENT || DEBUG
static void
assert_all_lock_groups_unlocked(lock_group_t const *lock_groups)
{
uint64_t reg_addr;
uint64_t ctrr_lock = 0;
bool locked = false;
bool write_disabled = false;;
assert(lock_groups);
for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
for (unsigned int aperture = 0; aperture < lock_groups[lg].aperture_count; aperture++) {
#if HAS_IOA
if (lock_groups[lg].master_lock_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + lock_groups[lg].master_lock_reg.reg_offset;
locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].master_lock_reg.reg_mask) == lock_groups[lg].master_lock_reg.reg_value);
}
#endif
for (unsigned int plane = 0; plane < lock_groups[lg].plane_count; plane++) {
if (lock_groups[lg].ctrr_a.write_disable_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.write_disable_reg.reg_offset;
write_disabled |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.write_disable_reg.reg_mask) == lock_groups[lg].ctrr_a.write_disable_reg.reg_value);
}
if (lock_groups[lg].ctrr_a.lock_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.lock_reg.reg_offset;
locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.lock_reg.reg_mask) == lock_groups[lg].ctrr_a.lock_reg.reg_value);
}
}
}
}
ctrr_lock = __builtin_arm_rsr64(CTRR_LOCK_MSR);
assert(!ctrr_lock);
assert(!write_disabled && !locked);
}
#endif
static void
lock_all_lock_groups(lock_group_t const *lock_group, vm_offset_t begin, vm_offset_t end)
{
uint64_t reg_addr;
assert(lock_group);
CleanPoC_DcacheRegion_Force(begin, end - begin + 1);
for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
unsigned int plane = lock_group[lg].plane_count - 1;
do {
if (lock_group[lg].ctrr_a.enable_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.enable_reg.reg_offset;
*(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.enable_reg.reg_value;
}
if (lock_group[lg].ctrr_a.write_disable_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.write_disable_reg.reg_offset;
*(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.write_disable_reg.reg_value;
}
if (lock_group[lg].ctrr_a.lock_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lock_reg.reg_offset;
*(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.lock_reg.reg_value;
}
__builtin_arm_isb(ISB_SY);
} while (plane-- > 0);
#if HAS_IOA
if (lock_group[lg].master_lock_reg.reg_mask != 0) {
reg_addr = lock_group_va[lg][aperture] + lock_group[lg].master_lock_reg.reg_offset;
*(volatile uint32_t *)reg_addr = lock_group[lg].master_lock_reg.reg_value;
}
__builtin_arm_isb(ISB_SY);
#endif
}
}
}
static void
lock_mmu(uint64_t begin, uint64_t end)
{
#if defined(KERNEL_INTEGRITY_KTRR)
__builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin);
__builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end);
__builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL);
__builtin_arm_isb(ISB_SY);
flush_mmu_tlb();
#elif defined (KERNEL_INTEGRITY_CTRR)
__builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin);
__builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end);
#if !defined(APPLEVORTEX)
__builtin_arm_isb(ISB_SY);
flush_mmu_tlb();
#endif
__builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
__builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL);
uint64_t current_el = __builtin_arm_rsr64("CurrentEL");
if (current_el == PSR64_MODE_EL2) {
__builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin);
__builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end);
__builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
__builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL);
}
__builtin_arm_isb(ISB_SY);
#if defined(APPLEVORTEX)
flush_mmu_tlb();
#endif
#else
#error KERNEL_INTEGRITY config error
#endif
}
#if DEVELOPMENT || DEBUG
static void
assert_amcc_cache_disabled(lock_group_t const *lock_group)
{
assert(lock_group);
const lock_reg_t *cache_status_reg = &lock_group[AMCC_LOCK_GROUP].cache_status_reg;
if (cache_status_reg->reg_mask != 0) {
return;
}
for (unsigned int aperture = 0; aperture < lock_group[AMCC_LOCK_GROUP].aperture_count; aperture++) {
for (unsigned int plane = 0; plane < lock_group[AMCC_LOCK_GROUP].plane_count; plane++) {
uint64_t reg_addr = lock_group_va[AMCC_LOCK_GROUP][aperture] + (plane * lock_group[AMCC_LOCK_GROUP].plane_stride) + cache_status_reg->reg_offset;
uint32_t reg_value = *(volatile uint32_t *)reg_addr;
assert((reg_value & cache_status_reg->reg_mask) == cache_status_reg->reg_value);
}
}
}
#endif
void
rorgn_lockdown(void)
{
boolean_t ctrr_disable = FALSE;
#if DEVELOPMENT || DEBUG
PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
#endif
#if CONFIG_CSR_FROM_DT
if (csr_unsafe_kernel_text) {
ctrr_disable = true;
}
#endif
if (!ctrr_disable) {
lock_group_t const * const lock_group = find_lock_group_data();
#if DEVELOPMENT || DEBUG
assert_all_lock_groups_unlocked(lock_group);
printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin, (void *)rorgn_end);
printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin, (void *)ctrr_end);
assert_amcc_cache_disabled(lock_group);
#endif
lock_all_lock_groups(lock_group, phystokv(rorgn_begin), phystokv(rorgn_end));
lock_mmu(ctrr_begin, ctrr_end);
for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
ml_io_unmap(lock_group_va[lg][aperture], lock_group[lg].aperture_size);
}
}
}
#if defined(KERNEL_INTEGRITY_CTRR)
cpu_data_t *cdp;
cdp = getCpuDatap();
cdp->cpu_cluster_id = ml_get_cluster_number_local();
assert(cdp->cpu_cluster_id <= (uint32_t)ml_get_max_cluster_number());
ctrr_cluster_locked[cdp->cpu_cluster_id] = CTRR_LOCKED;
thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]);
#endif
}
#endif