#if CONFIG_PROTECT
#include <sys/mount.h>
#include <sys/random.h>
#include <sys/xattr.h>
#include <sys/uio_internal.h>
#include <sys/ubc_internal.h>
#include <sys/vnode_if.h>
#include <sys/vnode_internal.h>
#include <sys/fcntl.h>
#include <libkern/OSByteOrder.h>
#include <libkern/crypto/sha1.h>
#include <sys/proc.h>
#include <sys/kauth.h>
#include "hfs.h"
#include "hfs_cnode.h"
#include "hfs_fsctl.h"
#include "hfs_cprotect.h"
#define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
static struct cp_wrap_func g_cp_wrap_func = {};
static int are_wraps_initialized = false;
extern int (**hfs_vnodeop_p) (void *);
static int cp_root_major_vers(mount_t mp);
static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
static void cp_entry_dealloc(hfsmount_t *hfsmp, struct cprotect *entry);
static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
static int cp_lock_vfs_callback(mount_t, void *);
static int cp_lock_vnode_callback(vnode_t, void *);
static int cp_vnode_is_eligible (vnode_t);
static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop);
static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
static void cp_init_access(cp_cred_t access, struct cnode *cp);
#if DEVELOPMENT || DEBUG
#define CP_ASSERT(x) \
if ((x) == 0) { \
panic("Content Protection: failed assertion in %s", __FUNCTION__); \
}
#else
#define CP_ASSERT(x)
#endif
size_t cpx_size(size_t key_size)
{
size_t size = sizeof(struct cpx) + key_size;
#if DEBUG
size += 4; #endif
return size;
}
static size_t cpx_sizex(const struct cpx *cpx)
{
return cpx_size(cpx->cpx_max_key_len);
}
cpx_t cpx_alloc(size_t key_len)
{
cpx_t cpx;
MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK);
cpx_init(cpx, key_len);
return cpx;
}
#if DEBUG
static const uint32_t cpx_magic1 = 0x7b787063; static const uint32_t cpx_magic2 = 0x7870637d; #endif
void cpx_free(cpx_t cpx)
{
#if DEBUG
assert(cpx->cpx_magic1 == cpx_magic1);
assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2);
#endif
bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
FREE(cpx, M_TEMP);
}
void cpx_init(cpx_t cpx, size_t key_len)
{
#if DEBUG
cpx->cpx_magic1 = cpx_magic1;
*PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2;
#endif
cpx->cpx_flags = 0;
cpx->cpx_key_len = 0;
cpx->cpx_max_key_len = key_len;
}
bool cpx_is_sep_wrapped_key(const struct cpx *cpx)
{
return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
}
void cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v)
{
if (v)
SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
else
CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
}
bool cpx_use_offset_for_iv(const struct cpx *cpx)
{
return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
}
void cpx_set_use_offset_for_iv(struct cpx *cpx, bool v)
{
if (v)
SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
else
CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
}
uint16_t cpx_max_key_len(const struct cpx *cpx)
{
return cpx->cpx_max_key_len;
}
uint16_t cpx_key_len(const struct cpx *cpx)
{
return cpx->cpx_key_len;
}
void cpx_set_key_len(struct cpx *cpx, uint16_t key_len)
{
cpx->cpx_key_len = key_len;
if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_HFS)) {
CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_HFS);
}
}
bool cpx_has_key(const struct cpx *cpx)
{
return cpx->cpx_key_len > 0;
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wcast-qual"
void *cpx_key(const struct cpx *cpx)
{
return (void *)cpx->cpx_cached_key;
}
#pragma clang diagnostic pop
static void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key)
{
aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx);
SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV);
CLR(cpx->cpx_flags, CPX_IV_AES_CTX_HFS);
}
aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx)
{
if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED))
return &cpx->cpx_iv_aes_ctx;
SHA1_CTX sha1ctxt;
uint8_t digest[SHA_DIGEST_LENGTH];
SHA1Init(&sha1ctxt);
SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len);
SHA1Final(digest, &sha1ctxt);
cpx_set_aes_iv_key(cpx, digest);
SET(cpx->cpx_flags, CPX_IV_AES_CTX_HFS);
return &cpx->cpx_iv_aes_ctx;
}
static void cpx_flush(cpx_t cpx)
{
bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx));
cpx->cpx_flags = 0;
cpx->cpx_key_len = 0;
}
static bool cpx_can_copy(const struct cpx *src, const struct cpx *dst)
{
return src->cpx_key_len <= dst->cpx_max_key_len;
}
void cpx_copy(const struct cpx *src, cpx_t dst)
{
uint16_t key_len = cpx_key_len(src);
cpx_set_key_len(dst, key_len);
memcpy(cpx_key(dst), cpx_key(src), key_len);
dst->cpx_flags = src->cpx_flags;
if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED))
dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx;
}
void cpkp_init(cp_key_pair_t *cpkp, uint16_t max_pers_key_len,
uint16_t max_cached_key_len)
{
cpkp->cpkp_max_pers_key_len = max_pers_key_len;
cpkp->cpkp_pers_key_len = 0;
cpx_init(&cpkp->cpkp_cpx, max_cached_key_len);
cpx_set_use_offset_for_iv(&cpkp->cpkp_cpx, true);
}
uint16_t cpkp_max_pers_key_len(const cp_key_pair_t *cpkp)
{
return cpkp->cpkp_max_pers_key_len;
}
uint16_t cpkp_pers_key_len(const cp_key_pair_t *cpkp)
{
return cpkp->cpkp_pers_key_len;
}
static bool cpkp_has_pers_key(const cp_key_pair_t *cpkp)
{
return cpkp->cpkp_pers_key_len > 0;
}
static void *cpkp_pers_key(const cp_key_pair_t *cpkp)
{
return PTR_ADD(void *, &cpkp->cpkp_cpx, cpx_sizex(&cpkp->cpkp_cpx));
}
static void cpkp_set_pers_key_len(cp_key_pair_t *cpkp, uint16_t key_len)
{
if (key_len > cpkp->cpkp_max_pers_key_len)
panic("hfs_cprotect: key too big!");
cpkp->cpkp_pers_key_len = key_len;
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wcast-qual"
cpx_t cpkp_cpx(const cp_key_pair_t *cpkp)
{
return (cpx_t)&cpkp->cpkp_cpx;
}
#pragma clang diagnostic pop
size_t cpkp_size(uint16_t pers_key_len, uint16_t cached_key_len)
{
return (sizeof(cp_key_pair_t) - sizeof(struct cpx)
+ pers_key_len + cpx_size(cached_key_len));
}
size_t cpkp_sizex(const cp_key_pair_t *cpkp)
{
return cpkp_size(cpkp->cpkp_max_pers_key_len, cpkp->cpkp_cpx.cpx_max_key_len);
}
void cpkp_flush(cp_key_pair_t *cpkp)
{
cpx_flush(&cpkp->cpkp_cpx);
cpkp->cpkp_pers_key_len = 0;
bzero(cpkp_pers_key(cpkp), cpkp->cpkp_max_pers_key_len);
}
bool cpkp_can_copy(const cp_key_pair_t *src, const cp_key_pair_t *dst)
{
return (cpkp_pers_key_len(src) <= dst->cpkp_max_pers_key_len
&& cpx_can_copy(&src->cpkp_cpx, &dst->cpkp_cpx));
}
void cpkp_copy(const cp_key_pair_t *src, cp_key_pair_t *dst)
{
const uint16_t key_len = cpkp_pers_key_len(src);
cpkp_set_pers_key_len(dst, key_len);
memcpy(cpkp_pers_key(dst), cpkp_pers_key(src), key_len);
cpx_copy(&src->cpkp_cpx, &dst->cpkp_cpx);
}
bool cp_is_supported_version(uint16_t vers)
{
return vers == CP_VERS_4 || vers == CP_VERS_5;
}
void cp_io_params(__unused hfsmount_t *hfsmp, cprotect_t cpr,
__unused off_rsrc_t off_rsrc,
__unused int direction, cp_io_params_t *io_params)
{
io_params->max_len = INT64_MAX;
io_params->phys_offset = -1;
io_params->cpx = cpkp_cpx(&cpr->cp_keys);
}
static void cp_flush_cached_keys(cprotect_t cpr)
{
cpx_flush(cpkp_cpx(&cpr->cp_keys));
}
static bool cp_needs_pers_key(cprotect_t cpr)
{
if (CP_CLASS(cpr->cp_pclass) == PROTECTION_CLASS_F)
return !cpx_has_key(cpkp_cpx(&cpr->cp_keys));
else
return !cpkp_has_pers_key(&cpr->cp_keys);
}
int
cp_key_store_action(int action)
{
if (action < 0 || action > CP_MAX_STATE) {
return -1;
}
return vfs_iterate(0, cp_lock_vfs_callback, (void*)(uintptr_t)action);
}
int
cp_register_wraps(cp_wrap_func_t key_store_func)
{
g_cp_wrap_func.new_key = key_store_func->new_key;
g_cp_wrap_func.unwrapper = key_store_func->unwrapper;
g_cp_wrap_func.rewrapper = key_store_func->rewrapper;
g_cp_wrap_func.invalidater = key_store_func->invalidater;
g_cp_wrap_func.backup_key = key_store_func->backup_key;
are_wraps_initialized = true;
return 0;
}
static cp_key_revision_t cp_initial_key_revision(__unused hfsmount_t *hfsmp)
{
return 1;
}
cp_key_revision_t cp_next_key_revision(cp_key_revision_t rev)
{
rev = (rev + 0x0100) ^ (mach_absolute_time() & 0xff);
if (!rev)
rev = 1;
return rev;
}
int
cp_entry_init(struct cnode *cp, struct mount *mp)
{
struct cprotect *entry = NULL;
int error = 0;
struct hfsmount *hfsmp = VFSTOHFS(mp);
if (!cp_fs_protected (mp)) {
cp->c_cpentry = NULL;
return 0;
}
if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
cp->c_cpentry = NULL;
return 0;
}
if (are_wraps_initialized == false) {
printf("hfs: cp_update_entry: wrap functions not yet set\n");
return ENXIO;
}
if (hfsmp->hfs_running_cp_major_vers == 0) {
panic ("hfs cp: no running mount point version! ");
}
CP_ASSERT (cp->c_cpentry == NULL);
error = cp_getxattr(cp, hfsmp, &entry);
if (error == ENOATTR) {
cp_key_class_t target_class = PROTECTION_CLASS_D;
if (S_ISDIR(cp->c_mode)) {
target_class = PROTECTION_CLASS_DIR_NONE;
}
cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
error = cp_new (&target_class, hfsmp, cp, cp->c_mode, CP_KEYWRAP_DIFFCLASS,
key_revision, (cp_new_alloc_fn)cp_entry_alloc, (void **)&entry);
if (error == 0) {
entry->cp_pclass = target_class;
entry->cp_key_os_version = cp_os_version();
entry->cp_key_revision = key_revision;
error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
}
}
if (error) {
goto out;
}
cp->c_cpentry = entry;
out:
if (error == 0) {
entry->cp_backing_cnode = cp;
}
else {
if (entry) {
cp_entry_destroy(hfsmp, entry);
}
cp->c_cpentry = NULL;
}
return error;
}
int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp,
cp_key_class_t suppliedclass, mode_t cmode,
struct cprotect **tmpentry)
{
int isdir = 0;
struct cprotect *entry = NULL;
uint32_t target_class = hfsmp->default_cp_class;
suppliedclass = CP_CLASS(suppliedclass);
if (hfsmp->hfs_running_cp_major_vers == 0) {
panic ("CP: major vers not set in mount!");
}
if (S_ISDIR (cmode)) {
isdir = 1;
}
if (cp_is_valid_class (isdir, suppliedclass)) {
target_class = suppliedclass;
if (isdir) {
if (target_class == PROTECTION_CLASS_F) {
*tmpentry = NULL;
return EINVAL;
}
}
}
else {
if ((dcp) && (dcp->c_cpentry)) {
uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass);
if (cp_is_valid_class(1, parentclass)) {
if (isdir) {
target_class = parentclass;
}
else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
target_class = parentclass;
}
}
}
}
entry = cp_entry_alloc(NULL, 0, 0, NULL);
if (entry == NULL) {
*tmpentry = NULL;
return ENOMEM;
}
entry->cp_flags = CP_NO_XATTR;
entry->cp_pclass = target_class;
*tmpentry = entry;
return 0;
}
int cpx_gentempkeys(cpx_t *pcpx, __unused struct hfsmount *hfsmp)
{
cpx_t cpx = cpx_alloc(CP_MAX_KEYSIZE);
cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
read_random(cpx_key(cpx), CP_MAX_KEYSIZE);
cpx_set_use_offset_for_iv(cpx, true);
*pcpx = cpx;
return 0;
}
void
cp_entry_destroy(hfsmount_t *hfsmp, struct cprotect *entry_ptr)
{
if (entry_ptr == NULL) {
return;
}
cp_entry_dealloc(hfsmp, entry_ptr);
}
int
cp_fs_protected (mount_t mnt)
{
return (vfs_flags(mnt) & MNT_CPROTECT);
}
struct cnode *
cp_get_protected_cnode(struct vnode *vp)
{
if (!cp_vnode_is_eligible(vp)) {
return NULL;
}
if (!cp_fs_protected(VTOVFS(vp))) {
return NULL;
}
return (struct cnode*) vp->v_data;
}
int
cp_vnode_getclass(struct vnode *vp, int *class)
{
struct cprotect *entry;
int error = 0;
struct cnode *cp;
int took_truncate_lock = 0;
struct hfsmount *hfsmp = NULL;
if (!cp_vnode_is_eligible (vp)) {
return EBADF;
}
if (!cp_fs_protected(VTOVFS(vp))) {
return ENOTSUP;
}
cp = VTOC(vp);
hfsmp = VTOHFS(vp);
hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
took_truncate_lock = 1;
error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
if (error) {
hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
return error;
}
entry = cp->c_cpentry;
if (entry == NULL) {
panic("Content Protection: uninitialized cnode %p", cp);
}
if (error == 0) {
*class = CP_CLASS(entry->cp_pclass);
}
if (took_truncate_lock) {
hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
}
hfs_unlock(cp);
return error;
}
int
cp_vnode_setclass(struct vnode *vp, uint32_t newclass)
{
struct cnode *cp;
struct cprotect *entry = 0;
int error = 0;
int took_truncate_lock = 0;
struct hfsmount *hfsmp = NULL;
int isdir = 0;
if (vnode_isdir (vp)) {
isdir = 1;
}
newclass = CP_CLASS(newclass);
if (!cp_is_valid_class(isdir, newclass)) {
printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
return EINVAL;
}
if (!cp_vnode_is_eligible(vp)) {
return EBADF;
}
if (!cp_fs_protected(VTOVFS(vp))) {
return ENOTSUP;
}
hfsmp = VTOHFS(vp);
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return EROFS;
}
cp = VTOC(vp);
hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
took_truncate_lock = 1;
vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass");
if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
return EINVAL;
}
entry = cp->c_cpentry;
if (entry == NULL) {
error = EINVAL;
goto out;
}
if (vnode_isreg(vp)) {
if (!cpx_has_key(&entry->cp_keys.cpkp_cpx)) {
error = cp_restore_keys (entry, hfsmp, cp);
if (error) {
goto out;
}
}
if (newclass == PROTECTION_CLASS_F) {
if (cp->c_datafork->ff_size > 0) {
error = EINVAL;
goto out;
}
cp_key_pair_t *cpkp;
cprotect_t new_entry = cp_entry_alloc(NULL, 0, CP_MAX_KEYSIZE, &cpkp);
if (!new_entry) {
error = ENOMEM;
goto out;
}
new_entry->cp_pclass = newclass;
new_entry->cp_key_os_version = cp_os_version();
new_entry->cp_key_revision = cp_next_key_revision(entry->cp_key_revision);
cpx_t cpx = cpkp_cpx(cpkp);
cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
read_random (cpx_key(cpx), CP_MAX_KEYSIZE);
cp_replace_entry(hfsmp, cp, new_entry);
error = 0;
goto out;
}
if (entry->cp_pclass == PROTECTION_CLASS_F) {
error = EPERM;
goto out;
}
if (!cpkp_has_pers_key(&entry->cp_keys)) {
struct cprotect *new_entry = NULL;
uint32_t flags = 0;
error = cp_generate_keys (hfsmp, cp, newclass, flags, &new_entry);
if (error == 0) {
cp_replace_entry (hfsmp, cp, new_entry);
}
goto out;
}
cprotect_t new_entry;
error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_keys, entry,
(cp_new_alloc_fn)cp_entry_alloc, (void **)&new_entry);
if (error) {
goto out;
}
new_entry->cp_pclass = newclass;
cp_replace_entry(hfsmp, cp, new_entry);
entry = new_entry;
}
else if (vnode_isdir(vp)) {
entry->cp_pclass = newclass;
error = 0;
}
else {
error = EINVAL;
goto out;
}
error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
if (error == ENOATTR) {
error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
}
out:
if (took_truncate_lock) {
hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
}
hfs_unlock(cp);
return error;
}
int cp_vnode_transcode(vnode_t vp, void *key, unsigned *len)
{
struct cnode *cp;
struct cprotect *entry = 0;
int error = 0;
int took_truncate_lock = 0;
struct hfsmount *hfsmp = NULL;
cp_cred_s access_in;
cp_wrapped_key_s wrapped_key_in, wrapped_key_out;
if (!cp_vnode_is_eligible(vp)) {
return EBADF;
}
if (!cp_fs_protected(VTOVFS(vp))) {
return ENOTSUP;
}
cp = VTOC(vp);
hfsmp = VTOHFS(vp);
hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
took_truncate_lock = 1;
if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
return EINVAL;
}
entry = cp->c_cpentry;
if (entry == NULL) {
error = EINVAL;
goto out;
}
if (vnode_isreg(vp)) {
if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
error = EINVAL;
goto out;
}
cp_init_access(&access_in, cp);
bzero(&wrapped_key_in, sizeof(wrapped_key_in));
bzero(&wrapped_key_out, sizeof(wrapped_key_out));
cp_key_pair_t *cpkp = &entry->cp_keys;
wrapped_key_in.key = cpkp_pers_key(cpkp);
wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
if (!wrapped_key_in.key_len) {
error = EINVAL;
goto out;
}
wrapped_key_in.dp_class = entry->cp_pclass;
wrapped_key_out.key = key;
wrapped_key_out.key_len = *len;
error = g_cp_wrap_func.backup_key(&access_in,
&wrapped_key_in,
&wrapped_key_out);
if(error)
error = EPERM;
else
*len = wrapped_key_out.key_len;
}
out:
if (took_truncate_lock) {
hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
}
hfs_unlock(cp);
return error;
}
int
cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
{
struct cprotect *entry;
int error = 0;
struct hfsmount *hfsmp = NULL;
struct cnode *cp = NULL;
if (cp_vnode_is_eligible(vp) == 0) {
return 0;
}
if (cp_fs_protected (VTOVFS(vp)) == 0) {
return 0;
}
cp = VTOC(vp);
if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
return error;
}
entry = cp->c_cpentry;
if (entry == NULL) {
if (vnode_isreg(vp)) {
error = EPERM;
}
goto out;
}
vp = CTOV(cp, 0);
if (vp == NULL) {
vp = CTOV(cp,1);
if (vp == NULL) {
error = EINVAL;
goto out;
}
}
hfsmp = VTOHFS(vp);
if ((error = cp_check_access(cp, hfsmp, vnop))) {
if ((ioflag & IO_ENCRYPTED)
&& (vnop == CP_READ_ACCESS)) {
error = 0;
}
else {
goto out;
}
}
if (!ISSET(entry->cp_flags, CP_NO_XATTR)) {
if (!S_ISREG(cp->c_mode))
goto out;
if (!cp_needs_pers_key(entry)
&& cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
goto out;
}
}
if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
return error;
}
} else {
cp->c_lockowner = current_thread();
}
if (cp_needs_pers_key(entry)) {
struct cprotect *newentry = NULL;
uint32_t flags = CP_KEYWRAP_DIFFCLASS;
error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
if (error == 0) {
cp_replace_entry (hfsmp, cp, newentry);
entry = newentry;
}
else {
goto out;
}
}
if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
error = 0;
}
else {
error = cp_restore_keys(entry, hfsmp, cp);
if (error) {
goto out;
}
}
}
if (entry->cp_flags & CP_NO_XATTR)
error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
out:
hfs_unlock(cp);
return error;
}
#if HFS_TMPDBG
#if !SECURE_KERNEL
static void cp_log_eperm (struct vnode* vp, int pclass, boolean_t create) {
char procname[256] = {};
const char *fname = "unknown";
const char *dbgop = "open";
int ppid = proc_selfpid();
proc_selfname(procname, sizeof(procname));
if (vp && vp->v_name) {
fname = vp->v_name;
}
if (create) {
dbgop = "create";
}
printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname, ppid, pclass, dbgop, fname);
}
#endif
#endif
int
cp_handle_open(struct vnode *vp, int mode)
{
struct cnode *cp = NULL ;
struct cprotect *entry = NULL;
struct hfsmount *hfsmp;
int error = 0;
if (!cp_vnode_is_eligible(vp)) {
return 0;
}
if (!cp_fs_protected(VTOVFS(vp))) {
return 0;
}
cp = VTOC(vp);
if (ISSET(mode, FENCRYPTED)) {
return 0;
}
if (ISSET(mode, FUNENCRYPTED)) {
return 0;
}
hfsmp = VTOHFS(vp);
if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
return error;
}
entry = cp->c_cpentry;
if (entry == NULL) {
if (vnode_isreg(vp)) {
error = EPERM;
}
goto out;
}
if (!S_ISREG(cp->c_mode))
goto out;
if (cp_needs_pers_key(entry)) {
struct cprotect *newentry = NULL;
uint32_t flags = CP_KEYWRAP_DIFFCLASS;
error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
if (error == 0) {
cp_replace_entry (hfsmp, cp, newentry);
entry = newentry;
}
else {
goto out;
}
}
switch (CP_CLASS(entry->cp_pclass)) {
case PROTECTION_CLASS_B:
if (mode & O_CREAT) {
break;
}
if (cpx_has_key(cpkp_cpx(&entry->cp_keys)) && !ISSET(mode, FENCRYPTED)) {
cp_cred_s access_in;
cp_wrapped_key_s wrapped_key_in;
cp_init_access(&access_in, cp);
bzero(&wrapped_key_in, sizeof(wrapped_key_in));
wrapped_key_in.key = cpkp_pers_key(&entry->cp_keys);
wrapped_key_in.key_len = cpkp_pers_key_len(&entry->cp_keys);
wrapped_key_in.dp_class = entry->cp_pclass;
error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, NULL);
if (error) {
error = EPERM;
}
break;
}
case PROTECTION_CLASS_A:
case PROTECTION_CLASS_C:
if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
error = cp_restore_keys(entry, hfsmp, cp);
}
if (error) {
error = EPERM;
}
break;
case PROTECTION_CLASS_D:
default:
break;
}
out:
#if HFS_TMPDBG
#if !SECURE_KERNEL
if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
cp_log_eperm (vp, CP_CLASS(entry->cp_pclass), false);
}
#endif
#endif
hfs_unlock(cp);
return error;
}
int
cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
{
uio_t auio;
char uio_buf[UIO_SIZEOF(1)];
void *buf;
size_t attrsize = roundup(sizeof(struct cp_root_xattr) + 64, 64);
int error = 0;
struct vnop_getxattr_args args;
if (!outxattr) {
panic("Content Protection: cp_xattr called with xattr == NULL");
}
MALLOC(buf, void *, attrsize, M_TEMP, M_WAITOK);
auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
uio_addiov(auio, CAST_USER_ADDR_T(buf), attrsize);
args.a_desc = NULL; args.a_vp = NULL; args.a_name = CONTENT_PROTECTION_XATTR_NAME;
args.a_uio = auio;
args.a_size = &attrsize;
args.a_options = XATTR_REPLACE;
args.a_context = NULL;
error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
if (error != 0) {
goto out;
}
if (attrsize < CP_ROOT_XATTR_MIN_LEN) {
error = HFS_EINCONSISTENT;
goto out;
}
const struct cp_root_xattr *xattr = buf;
bzero(outxattr, sizeof(*outxattr));
outxattr->major_version = OSSwapLittleToHostInt16(xattr->major_version);
outxattr->minor_version = OSSwapLittleToHostInt16(xattr->minor_version);
outxattr->flags = OSSwapLittleToHostInt64(xattr->flags);
if (outxattr->major_version >= CP_VERS_5) {
if (attrsize < sizeof(struct cp_root_xattr)) {
error = HFS_EINCONSISTENT;
goto out;
}
}
out:
uio_free(auio);
FREE(buf, M_TEMP);
return error;
}
int
cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
{
int error = 0;
struct vnop_setxattr_args args;
args.a_desc = NULL;
args.a_vp = NULL;
args.a_name = CONTENT_PROTECTION_XATTR_NAME;
args.a_uio = NULL; args.a_options = 0;
args.a_context = NULL;
const uint32_t flags = newxattr->flags;
newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
int xattr_size = sizeof(struct cp_root_xattr);
newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
xattr_size, &args, hfsmp, 1);
if (!error) {
hfsmp->cproot_flags = flags;
}
return error;
}
int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp,
uint32_t fileid, int options)
{
int error = 0;
cp_key_pair_t *cpkp = &entry->cp_keys;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return EROFS;
}
if (hfsmp->hfs_running_cp_major_vers < CP_CURRENT_VERS) {
printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
struct cp_root_xattr root_xattr;
error = cp_getrootxattr(hfsmp, &root_xattr);
if (error)
return error;
root_xattr.major_version = CP_CURRENT_VERS;
root_xattr.minor_version = CP_MINOR_VERS;
error = cp_setrootxattr(hfsmp, &root_xattr);
if (error)
return error;
hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
}
struct cp_xattr_v5 *xattr;
MALLOC(xattr, struct cp_xattr_v5 *, sizeof(*xattr), M_TEMP, M_WAITOK);
xattr->xattr_major_version = OSSwapHostToLittleConstInt16(CP_VERS_5);
xattr->xattr_minor_version = OSSwapHostToLittleConstInt16(CP_MINOR_VERS);
xattr->flags = 0;
xattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
xattr->key_os_version = OSSwapHostToLittleInt32(entry->cp_key_os_version);
xattr->key_revision = OSSwapHostToLittleInt16(entry->cp_key_revision);
uint16_t key_len = cpkp_pers_key_len(cpkp);
xattr->key_len = OSSwapHostToLittleInt16(key_len);
memcpy(xattr->persistent_key, cpkp_pers_key(cpkp), key_len);
size_t xattr_len = offsetof(struct cp_xattr_v5, persistent_key) + key_len;
struct vnop_setxattr_args args = {
.a_vp = cp ? cp->c_vp : NULL,
.a_name = CONTENT_PROTECTION_XATTR_NAME,
.a_options = options,
.a_context = vfs_context_current(),
};
error = hfs_setxattr_internal(cp, xattr, xattr_len, &args, hfsmp, fileid);
FREE(xattr, M_TEMP);
if (error == 0 ) {
entry->cp_flags &= ~CP_NO_XATTR;
}
return error;
}
int
cp_get_root_major_vers(vnode_t vp, uint32_t *level)
{
int err = 0;
struct hfsmount *hfsmp = NULL;
struct mount *mp = NULL;
mp = VTOVFS(vp);
if (cp_fs_protected(mp) == 0) {
return ENOTSUP;
}
hfsmp = VFSTOHFS(mp);
err = cp_root_major_vers(mp);
if (err == 0) {
*level = hfsmp->hfs_running_cp_major_vers;
}
return err;
}
int cp_get_default_level (struct vnode *vp, uint32_t *level) {
int err = 0;
struct hfsmount *hfsmp = NULL;
struct mount *mp = NULL;
mp = VTOVFS(vp);
if (cp_fs_protected(mp) == 0) {
return ENOTSUP;
}
hfsmp = VFSTOHFS(mp);
*level = hfsmp->default_cp_class;
return err;
}
static int
cp_root_major_vers(mount_t mp)
{
int err = 0;
struct cp_root_xattr xattr;
struct hfsmount *hfsmp = NULL;
hfsmp = vfs_fsprivate(mp);
err = cp_getrootxattr (hfsmp, &xattr);
if (err == 0) {
hfsmp->hfs_running_cp_major_vers = xattr.major_version;
}
else {
return EINVAL;
}
return 0;
}
static int
cp_vnode_is_eligible(struct vnode *vp)
{
return ((vp->v_op == hfs_vnodeop_p) &&
(!vnode_issystem(vp)) &&
(vnode_isreg(vp) || vnode_isdir(vp)));
}
int
cp_is_valid_class(int isdir, int32_t protectionclass)
{
if (isdir) {
return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
(protectionclass <= PROTECTION_CLASS_D));
}
else {
return ((protectionclass >= PROTECTION_CLASS_A) &&
(protectionclass <= PROTECTION_CLASS_F));
}
}
#if DEBUG
static const uint32_t cp_magic1 = 0x7b727063; static const uint32_t cp_magic2 = 0x7270637d; #endif
struct cprotect *
cp_entry_alloc(cprotect_t old, uint16_t pers_key_len,
uint16_t cached_key_len, cp_key_pair_t **pcpkp)
{
struct cprotect *cp_entry;
if (pers_key_len > CP_MAX_WRAPPEDKEYSIZE)
return (NULL);
size_t size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
+ cpkp_size(pers_key_len, cached_key_len));
#if DEBUG
size += 4; #endif
MALLOC(cp_entry, struct cprotect *, size, M_TEMP, M_WAITOK);
if (old) {
memcpy(cp_entry, old, offsetof(struct cprotect, cp_keys));
} else {
bzero(cp_entry, offsetof(struct cprotect, cp_keys));
}
#if DEBUG
cp_entry->cp_magic1 = cp_magic1;
*PTR_ADD(uint32_t *, cp_entry, size - 4) = cp_magic2;
#endif
cpkp_init(&cp_entry->cp_keys, pers_key_len, cached_key_len);
if (old)
cpx_copy(cpkp_cpx(&old->cp_keys), cpkp_cpx(&cp_entry->cp_keys));
if (pcpkp)
*pcpkp = &cp_entry->cp_keys;
return cp_entry;
}
static void
cp_entry_dealloc(__unused hfsmount_t *hfsmp, struct cprotect *entry)
{
cpkp_flush(&entry->cp_keys);
#if DEBUG
assert(entry->cp_magic1 == cp_magic1);
assert(*PTR_ADD(uint32_t *, entry, (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
+ cpkp_sizex(&entry->cp_keys) == cp_magic2)));
#endif
FREE(entry, M_TEMP);
}
static int cp_read_xattr_v4(__unused hfsmount_t *hfsmp, struct cp_xattr_v4 *xattr,
size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
{
xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE)
return HFS_EINCONSISTENT;
size_t min_len = offsetof(struct cp_xattr_v4, persistent_key) + xattr->key_size;
if (xattr_len < min_len)
return HFS_EINCONSISTENT;
if (CP_CLASS(xattr->persistent_class) == PROTECTION_CLASS_F
|| ISSET(xattr->flags, CP_XAF_NEEDS_KEYS)) {
xattr->key_size = 0;
}
cp_key_pair_t *cpkp;
cprotect_t entry;
if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
entry = *pcpr;
bzero(entry, offsetof(struct cprotect, cp_keys));
}
else {
entry = cp_entry_alloc(NULL, xattr->key_size, CP_MAX_CACHEBUFLEN, &cpkp);
}
entry->cp_pclass = xattr->persistent_class;
entry->cp_key_os_version = xattr->key_os_version;
if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
if (xattr->key_size) {
cpkp_set_pers_key_len(cpkp, xattr->key_size);
memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_size);
}
*pcpr = entry;
}
else if (xattr->key_size) {
SET(entry->cp_flags, CP_HAS_A_KEY);
}
return 0;
}
int cp_read_xattr_v5(hfsmount_t *hfsmp, struct cp_xattr_v5 *xattr,
size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
{
if (xattr->xattr_major_version == OSSwapHostToLittleConstInt16(CP_VERS_4)) {
return cp_read_xattr_v4(hfsmp, (struct cp_xattr_v4 *)xattr, xattr_len, pcpr, options);
}
xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
if (xattr->xattr_major_version != CP_VERS_5) {
printf("hfs: cp_getxattr: unsupported xattr version %d\n",
xattr->xattr_major_version);
return ENOTSUP;
}
size_t min_len = offsetof(struct cp_xattr_v5, persistent_key);
if (xattr_len < min_len)
return HFS_EINCONSISTENT;
xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
xattr->key_revision = OSSwapLittleToHostInt16(xattr->key_revision);
xattr->key_len = OSSwapLittleToHostInt16(xattr->key_len);
uint16_t pers_key_len = xattr->key_len;
min_len += pers_key_len;
if (xattr_len < min_len)
return HFS_EINCONSISTENT;
cp_key_pair_t *cpkp;
cprotect_t entry;
if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
entry = *pcpr;
bzero(entry, offsetof(struct cprotect, cp_keys));
} else {
entry = cp_entry_alloc(NULL, xattr->key_len, CP_MAX_CACHEBUFLEN, &cpkp);
}
entry->cp_pclass = xattr->persistent_class;
entry->cp_key_os_version = xattr->key_os_version;
entry->cp_key_revision = xattr->key_revision;
if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
if (xattr->key_len) {
cpkp_set_pers_key_len(cpkp, xattr->key_len);
memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_len);
}
*pcpr = entry;
}
else if (xattr->key_len) {
SET(entry->cp_flags, CP_HAS_A_KEY);
}
return 0;
}
static int
cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, cprotect_t *outentry)
{
size_t xattr_len = sizeof(struct cp_xattr_v5);
struct cp_xattr_v5 *xattr;
MALLOC (xattr, struct cp_xattr_v5 *, xattr_len,
M_TEMP, M_WAITOK);
int error = hfs_xattr_read(cp->c_vp, CONTENT_PROTECTION_XATTR_NAME,
xattr, &xattr_len);
if (!error) {
if (xattr_len < CP_XATTR_MIN_LEN)
error = HFS_EINCONSISTENT;
else
error = cp_read_xattr_v5(hfsmp, xattr, xattr_len, outentry, 0);
}
#if DEBUG
if (error && error != ENOATTR) {
printf("cp_getxattr: bad cp xattr (%d):\n", error);
for (size_t i = 0; i < xattr_len; ++i)
printf("%02x ", ((uint8_t *)xattr)[i]);
printf("\n");
}
#endif
FREE(xattr, M_TEMP);
return error;
}
static int
cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
{
int error = 0;
error = cp_unwrap(hfsmp, entry, cp);
if (error) {
cp_flush_cached_keys(entry);
error = EPERM;
}
return error;
}
static int
cp_lock_vfs_callback(mount_t mp, void *arg)
{
unsigned long new_state;
struct hfsmount *hfsmp;
if (!cp_fs_protected(mp)) {
return 0;
}
new_state = (unsigned long) arg;
hfsmp = VFSTOHFS(mp);
hfs_lock_mount(hfsmp);
hfsmp->hfs_cp_lock_state = (uint8_t) new_state;
hfs_unlock_mount(hfsmp);
if (new_state == CP_LOCKED_STATE) {
return vnode_iterate(mp, 0, cp_lock_vnode_callback, arg);
}
return 0;
}
static int
cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused)
{
int error = 0;
if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) {
return 0;
}
if (!cp->c_cpentry) {
return 0;
}
if (!S_ISREG(cp->c_mode)) {
return 0;
}
switch (CP_CLASS(cp->c_cpentry->cp_pclass)) {
case PROTECTION_CLASS_A: {
error = EPERM;
break;
}
default:
error = 0;
break;
}
return error;
}
static int
cp_lock_vnode_callback(struct vnode *vp, void *arg)
{
cnode_t *cp = NULL;
struct cprotect *entry = NULL;
int error = 0;
int locked = 1;
unsigned long action = 0;
int took_truncate_lock = 0;
error = vnode_getwithref (vp);
if (error) {
return error;
}
cp = VTOC(vp);
hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
took_truncate_lock = 1;
hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
entry = cp->c_cpentry;
if (!entry) {
goto out;
}
action = (unsigned long) arg;
switch (action) {
case CP_LOCKED_STATE: {
vfs_context_t ctx;
if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A ||
vnode_isdir(vp)) {
goto out;
}
ctx = vfs_context_current();
(void) hfs_filedone (vp, ctx, 0);
hfs_unlock (cp);
ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cp_flush_cached_keys(entry);
hfs_unlock(cp);
locked = 0;
ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
break;
}
case CP_UNLOCKED_STATE: {
break;
}
default:
panic("Content Protection: unknown lock action %lu\n", action);
}
out:
if (locked) {
hfs_unlock(cp);
}
if (took_truncate_lock) {
hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
}
vnode_put (vp);
return error;
}
int
cp_rewrap(struct cnode *cp, __unused hfsmount_t *hfsmp,
cp_key_class_t *newclass, cp_key_pair_t *cpkp, const void *old_holder,
cp_new_alloc_fn alloc_fn, void **pholder)
{
struct cprotect *entry = cp->c_cpentry;
uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
size_t keylen = CP_MAX_WRAPPEDKEYSIZE;
int error = 0;
const cp_key_class_t key_class = CP_CLASS(*newclass);
cp_cred_s access_in;
cp_wrapped_key_s wrapped_key_in;
cp_wrapped_key_s wrapped_key_out;
if (key_class == PROTECTION_CLASS_F) {
return EINVAL;
}
cp_init_access(&access_in, cp);
bzero(&wrapped_key_in, sizeof(wrapped_key_in));
wrapped_key_in.key = cpkp_pers_key(cpkp);
wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
wrapped_key_in.dp_class = entry->cp_pclass;
bzero(&wrapped_key_out, sizeof(wrapped_key_out));
wrapped_key_out.key = new_persistent_key;
wrapped_key_out.key_len = keylen;
error = g_cp_wrap_func.rewrapper(&access_in,
key_class,
&wrapped_key_in,
&wrapped_key_out);
keylen = wrapped_key_out.key_len;
if (error == 0) {
cp_key_class_t effective = CP_CLASS(wrapped_key_out.dp_class);
if (effective != key_class) {
return EPERM;
}
cp_key_pair_t *new_cpkp;
*pholder = alloc_fn(old_holder, keylen, CP_MAX_CACHEBUFLEN, &new_cpkp);
cpkp_set_pers_key_len(new_cpkp, keylen);
memcpy(cpkp_pers_key(new_cpkp), new_persistent_key, keylen);
*newclass = wrapped_key_out.dp_class;
}
else {
error = EPERM;
}
return error;
}
static int cpkp_unwrap(cnode_t *cp, cp_key_class_t key_class, cp_key_pair_t *cpkp)
{
int error = 0;
uint8_t iv_key[CP_IV_KEYSIZE];
cpx_t cpx = cpkp_cpx(cpkp);
cp_cred_s access_in;
cp_wrapped_key_s wrapped_key_in;
cp_raw_key_s key_out;
cp_init_access(&access_in, cp);
bzero(&wrapped_key_in, sizeof(wrapped_key_in));
wrapped_key_in.key = cpkp_pers_key(cpkp);
wrapped_key_in.key_len = cpkp_max_pers_key_len(cpkp);
wrapped_key_in.dp_class = key_class;
bzero(&key_out, sizeof(key_out));
key_out.iv_key = iv_key;
key_out.key = cpx_key(cpx);
key_out.iv_key_len = CP_IV_KEYSIZE;
key_out.key_len = cpx_max_key_len(cpx);
error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, &key_out);
if (!error) {
if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len);
}
if (key_out.iv_key_len != CP_IV_KEYSIZE)
panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len);
cpx_set_key_len(cpx, key_out.key_len);
cpx_set_aes_iv_key(cpx, iv_key);
cpx_set_is_sep_wrapped_key(cpx, ISSET(key_out.flags, CP_RAW_KEY_WRAPPEDKEY));
} else {
error = EPERM;
}
return error;
}
static int
cp_unwrap(__unused struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
{
if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
return EPERM;
}
int error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_keys);
return error;
}
int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, cp_key_class_t targetclass,
uint32_t keyflags, struct cprotect **newentry)
{
int error = 0;
struct cprotect *newcp = NULL;
*newentry = NULL;
targetclass = CP_CLASS(targetclass);
if (cp->c_cpentry == NULL) {
return 0;
}
if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
error = EINVAL;
goto out;
}
if (S_ISREG(cp->c_mode)) {
if (!cp_needs_pers_key(cp->c_cpentry)) {
error = EINVAL;
goto out;
}
}
cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
error = cp_new (&targetclass, hfsmp, cp, cp->c_mode, keyflags, key_revision,
(cp_new_alloc_fn)cp_entry_alloc, (void **)&newcp);
if (error) {
error = EPERM;
goto out;
}
newcp->cp_pclass = targetclass;
newcp->cp_key_os_version = cp_os_version();
newcp->cp_key_revision = key_revision;
error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
if (error) {
if (newcp) {
cp_entry_destroy(hfsmp, newcp);
}
goto out;
}
*newentry = newcp;
out:
return error;
}
void cp_replace_entry (hfsmount_t *hfsmp, struct cnode *cp, struct cprotect *newentry)
{
if (cp->c_cpentry) {
cp_entry_destroy (hfsmp, cp->c_cpentry);
}
cp->c_cpentry = newentry;
newentry->cp_backing_cnode = cp;
return;
}
int
cp_new(cp_key_class_t *newclass_eff, __unused struct hfsmount *hfsmp, struct cnode *cp,
mode_t cmode, int32_t keyflags, cp_key_revision_t key_revision,
cp_new_alloc_fn alloc_fn, void **pholder)
{
int error = 0;
uint8_t new_key[CP_MAX_CACHEBUFLEN];
size_t new_key_len = CP_MAX_CACHEBUFLEN;
uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
size_t new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
uint8_t iv_key[CP_IV_KEYSIZE];
size_t iv_key_len = CP_IV_KEYSIZE;
int iswrapped = 0;
cp_key_class_t key_class = CP_CLASS(*newclass_eff);
cp_cred_s access_in;
cp_wrapped_key_s wrapped_key_out;
cp_raw_key_s key_out;
if (are_wraps_initialized == false) {
printf("hfs: cp_new: wrap/gen functions not yet set\n");
return ENXIO;
}
if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) {
return EPERM;
}
if (S_ISDIR (cmode)) {
new_persistent_len = 0;
new_key_len = 0;
error = 0;
}
else {
if (key_class == PROTECTION_CLASS_F) {
new_key_len = CP_MAX_KEYSIZE;
read_random (&new_key[0], new_key_len);
new_persistent_len = 0;
error = 0;
}
else {
cp_init_access(&access_in, cp);
bzero(&key_out, sizeof(key_out));
key_out.key = new_key;
key_out.iv_key = iv_key;
key_out.key_len = new_key_len;
key_out.iv_key_len = iv_key_len;
bzero(&wrapped_key_out, sizeof(wrapped_key_out));
wrapped_key_out.key = new_persistent_key;
wrapped_key_out.key_len = new_persistent_len;
access_in.key_revision = key_revision;
error = g_cp_wrap_func.new_key(&access_in,
key_class,
&key_out,
&wrapped_key_out);
if (error) {
error = EPERM;
goto cpnew_fail;
}
if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len);
}
if (key_out.iv_key_len != CP_IV_KEYSIZE) {
panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len);
}
if (CP_CLASS(wrapped_key_out.dp_class) != key_class) {
if (!ISSET(keyflags, CP_KEYWRAP_DIFFCLASS)) {
error = EPERM;
goto cpnew_fail;
}
}
*newclass_eff = wrapped_key_out.dp_class;
new_key_len = key_out.key_len;
iv_key_len = key_out.iv_key_len;
new_persistent_len = wrapped_key_out.key_len;
if (key_out.flags & CP_RAW_KEY_WRAPPEDKEY) {
iswrapped = 1;
}
}
}
cp_key_pair_t *cpkp;
*pholder = alloc_fn(NULL, new_persistent_len, new_key_len, &cpkp);
if (*pholder == NULL) {
return ENOMEM;
}
if (new_key_len > 0) {
cpx_t cpx = cpkp_cpx(cpkp);
cpx_set_key_len(cpx, new_key_len);
memcpy(cpx_key(cpx), new_key, new_key_len);
if (key_class != PROTECTION_CLASS_F)
cpx_set_aes_iv_key(cpx, iv_key);
cpx_set_is_sep_wrapped_key(cpx, iswrapped);
}
if (new_persistent_len > 0) {
cpkp_set_pers_key_len(cpkp, new_persistent_len);
memcpy(cpkp_pers_key(cpkp), new_persistent_key, new_persistent_len);
}
cpnew_fail:
#if HFS_TMPDBG
#if !SECURE_KERNEL
if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
cp_log_eperm (cp->c_vp, *newclass_eff, true);
}
#endif
#endif
return error;
}
static void cp_init_access(cp_cred_t access, struct cnode *cp)
{
vfs_context_t context = vfs_context_current();
kauth_cred_t cred = vfs_context_ucred(context);
proc_t proc = vfs_context_proc(context);
bzero(access, sizeof(*access));
access->inode = cp->c_fileid;
access->pid = proc_pid(proc);
access->uid = kauth_cred_getuid(cred);
if (cp->c_cpentry)
access->key_revision = cp->c_cpentry->cp_key_revision;
return;
}
static cp_key_os_version_t parse_os_version(void)
{
const char *p = osversion;
int a = 0;
while (*p >= '0' && *p <= '9') {
a = a * 10 + *p - '0';
++p;
}
if (!a)
return 0;
int b = *p++;
if (!b)
return 0;
int c = 0;
while (*p >= '0' && *p <= '9') {
c = c * 10 + *p - '0';
++p;
}
if (!c)
return 0;
return (a & 0xff) << 24 | b << 16 | (c & 0xffff);
}
cp_key_os_version_t cp_os_version(void)
{
static cp_key_os_version_t cp_os_version;
if (cp_os_version)
return cp_os_version;
if (!osversion[0])
return 0;
cp_os_version = parse_os_version();
if (!cp_os_version) {
printf("cp_os_version: unable to parse osversion `%s'\n", osversion);
cp_os_version = 1;
}
return cp_os_version;
}
errno_t cp_handle_strategy(buf_t bp)
{
vnode_t vp = buf_vnode(bp);
cnode_t *cp = NULL;
if (bufattr_rawencrypted(buf_attr(bp))
|| !(cp = cp_get_protected_cnode(vp))
|| !cp->c_cpentry) {
return 0;
}
if (ISSET(cp->c_cpentry->cp_flags, CP_RELOCATION_INFLIGHT))
return 0;
{
cpx_t cpx = cpkp_cpx(&cp->c_cpentry->cp_keys);
if (cpx_has_key(cpx)) {
bufattr_setcpx(buf_attr(bp), cpx);
return 0;
}
}
off_rsrc_t off_rsrc = off_rsrc_make(buf_lblkno(bp) * GetLogicalBlockSize(vp),
VNODE_IS_RSRC(vp));
cp_io_params_t io_params;
if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
vnode_writedone(vp);
hfs_lock_always(cp, HFS_SHARED_LOCK);
cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
&io_params);
hfs_unlock(cp);
if (!cpx_has_key(io_params.cpx)) {
int io_op = ( (buf_flags(bp) & B_READ) ? CP_READ_ACCESS : CP_WRITE_ACCESS);
errno_t error = cp_handle_vnop(vp, io_op, 0);
if (error) {
if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
vnode_startwrite(vp);
buf_seterror (bp, error);
buf_biodone(bp);
return error;
}
hfs_lock_always(cp, HFS_SHARED_LOCK);
cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
&io_params);
hfs_unlock(cp);
}
assert(buf_count(bp) <= io_params.max_len);
bufattr_setcpx(buf_attr(bp), io_params.cpx);
if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
vnode_startwrite(vp);
return 0;
}
#else // !CONFIG_PROTECT
#include <sys/cdefs.h>
#include <sys/cprotect.h>
#include <sys/errno.h>
int cp_key_store_action(int action __unused)
{
return ENOTSUP;
}
int cp_register_wraps(cp_wrap_func_t key_store_func __unused)
{
return ENOTSUP;
}
size_t cpx_size(__unused size_t key_size)
{
return 0;
}
cpx_t cpx_alloc(__unused size_t key_size)
{
return NULL;
}
void cpx_free(__unused cpx_t cpx)
{
}
bool cpx_is_sep_wrapped_key(__unused const struct cpx *cpx)
{
return false;
}
void cpx_set_is_sep_wrapped_key(__unused struct cpx *cpx, __unused bool v)
{
}
bool cpx_use_offset_for_iv(__unused const struct cpx *cpx)
{
return false;
}
void cpx_set_use_offset_for_iv(__unused struct cpx *cpx, __unused bool v)
{
}
uint16_t cpx_key_len(__unused const struct cpx *cpx)
{
return 0;
}
void cpx_set_key_len(__unused struct cpx *cpx, __unused uint16_t key_len)
{
}
void *cpx_key(__unused const struct cpx *cpx)
{
return NULL;
}
aes_encrypt_ctx *cpx_iv_aes_ctx(__unused cpx_t cpx)
{
return NULL;
}
#endif