#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/mount_internal.h>
#include <sys/vnode_internal.h>
#include <sys/ubc_internal.h>
#include <sys/ucred.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/buf.h>
#include <sys/user.h>
#include <sys/codesign.h>
#include <sys/codedir_internal.h>
#include <sys/fsevents.h>
#include <sys/fcntl.h>
#include <mach/mach_types.h>
#include <mach/memory_object_types.h>
#include <mach/memory_object_control.h>
#include <mach/vm_map.h>
#include <mach/mach_vm.h>
#include <mach/upl.h>
#include <kern/kern_types.h>
#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <kern/thread.h>
#include <vm/vm_kern.h>
#include <vm/vm_protos.h>
#include <libkern/crypto/sha1.h>
#include <libkern/libkern.h>
#include <security/mac_framework.h>
#include <stdbool.h>
extern kern_return_t memory_object_pages_resident(memory_object_control_t,
boolean_t *);
extern kern_return_t memory_object_signed(memory_object_control_t control,
boolean_t is_signed);
extern boolean_t memory_object_is_slid(memory_object_control_t control);
extern boolean_t memory_object_is_signed(memory_object_control_t);
extern void Debugger(const char *message);
kern_return_t ubc_page_op_with_control(
memory_object_control_t control,
off_t f_offset,
int ops,
ppnum_t *phys_entryp,
int *flagsp);
#if DIAGNOSTIC
#if defined(assert)
#undef assert
#endif
#define assert(cond) \
((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
#else
#include <kern/assert.h>
#endif
static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
static int ubc_umcallback(vnode_t, void *);
static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
static void ubc_cs_free(struct ubc_info *uip);
struct zone *ubc_info_zone;
static uint32_t cs_blob_generation_count = 1;
extern int cs_debug;
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K ((1<<PAGE_SHIFT_4K))
#define PAGE_MASK_4K ((PAGE_SIZE_4K-1))
#define round_page_4K(x) (((vm_offset_t)(x) + PAGE_MASK_4K) & ~((vm_offset_t)PAGE_MASK_4K))
static boolean_t
cs_valid_range(
const void *start,
const void *end,
const void *lower_bound,
const void *upper_bound)
{
if (upper_bound < lower_bound ||
end < start) {
return FALSE;
}
if (start < lower_bound ||
end > upper_bound) {
return FALSE;
}
return TRUE;
}
const
CS_CodeDirectory *findCodeDirectory(
const CS_SuperBlob *embedded,
char *lower_bound,
char *upper_bound)
{
const CS_CodeDirectory *cd = NULL;
if (embedded &&
cs_valid_range(embedded, embedded + 1, lower_bound, upper_bound) &&
ntohl(embedded->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
const CS_BlobIndex *limit;
const CS_BlobIndex *p;
limit = &embedded->index[ntohl(embedded->count)];
if (!cs_valid_range(&embedded->index[0], limit,
lower_bound, upper_bound)) {
return NULL;
}
for (p = embedded->index; p < limit; ++p) {
if (ntohl(p->type) == CSSLOT_CODEDIRECTORY) {
const unsigned char *base;
base = (const unsigned char *)embedded;
cd = (const CS_CodeDirectory *)(base + ntohl(p->offset));
break;
}
}
} else {
cd = (const CS_CodeDirectory *) embedded;
}
if (cd &&
cs_valid_range(cd, cd + 1, lower_bound, upper_bound) &&
cs_valid_range(cd, (const char *) cd + ntohl(cd->length),
lower_bound, upper_bound) &&
cs_valid_range(cd, (const char *) cd + ntohl(cd->hashOffset),
lower_bound, upper_bound) &&
cs_valid_range(cd, (const char *) cd +
ntohl(cd->hashOffset) +
(ntohl(cd->nCodeSlots) * SHA1_RESULTLEN),
lower_bound, upper_bound) &&
ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY) {
return cd;
}
return NULL;
}
static const unsigned char *
hashes(
const CS_CodeDirectory *cd,
unsigned page,
char *lower_bound,
char *upper_bound)
{
const unsigned char *base, *top, *hash;
uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
const SC_Scatter *scatter = (const SC_Scatter*)
((const char*)cd + ntohl(cd->scatterOffset));
uint32_t hashindex=0, scount, sbase=0;
do {
if((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
if(cs_debug) {
printf("CODE SIGNING: Scatter extends past Code Directory\n");
}
return NULL;
}
scount = ntohl(scatter->count);
uint32_t new_base = ntohl(scatter->base);
if (scount == 0) {
return NULL;
}
if((hashindex > 0) && (new_base <= sbase)) {
if(cs_debug) {
printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
sbase, new_base);
}
return NULL;
}
sbase = new_base;
if (sbase > page) {
return NULL;
}
if (sbase+scount >= page) {
base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
hashindex * SHA1_RESULTLEN;
top = base + scount * SHA1_RESULTLEN;
if (!cs_valid_range(base, top, lower_bound,
upper_bound) ||
hashindex > nCodeSlots) {
return NULL;
}
break;
}
hashindex+=scount;
scatter++;
} while(1);
hash = base + (page - sbase) * SHA1_RESULTLEN;
} else {
base = (const unsigned char *)cd + ntohl(cd->hashOffset);
top = base + nCodeSlots * SHA1_RESULTLEN;
if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
page > nCodeSlots) {
return NULL;
}
assert(page < nCodeSlots);
hash = base + page * SHA1_RESULTLEN;
}
if (!cs_valid_range(hash, hash + SHA1_RESULTLEN,
lower_bound, upper_bound)) {
hash = NULL;
}
return hash;
}
static int
cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
{
if (length < sizeof(*cd))
return EBADEXEC;
if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY)
return EBADEXEC;
if (cd->hashSize != SHA1_RESULTLEN)
return EBADEXEC;
if (cd->pageSize != PAGE_SHIFT_4K)
return EBADEXEC;
if (cd->hashType != CS_HASHTYPE_SHA1)
return EBADEXEC;
if (length < ntohl(cd->hashOffset))
return EBADEXEC;
if (ntohl(cd->hashOffset) / SHA1_RESULTLEN < ntohl(cd->nSpecialSlots))
return EBADEXEC;
if ((length - ntohl(cd->hashOffset)) / SHA1_RESULTLEN < ntohl(cd->nCodeSlots))
return EBADEXEC;
if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
if (length < ntohl(cd->scatterOffset))
return EBADEXEC;
SC_Scatter *scatter = (SC_Scatter *)
(((uint8_t *)cd) + ntohl(cd->scatterOffset));
uint32_t nPages = 0;
while(1) {
if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length)
return EBADEXEC;
uint32_t scount = ntohl(scatter->count);
if (scount == 0)
break;
if (nPages + scount < nPages)
return EBADEXEC;
nPages += scount;
scatter++;
}
#if 0
if (nPages != ntohl(cd->nCodeSlots))
return EBADEXEC;
#endif
}
if (length < ntohl(cd->identOffset))
return EBADEXEC;
if (cd->identOffset) {
uint8_t *ptr = (uint8_t *)cd + ntohl(cd->identOffset);
if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL)
return EBADEXEC;
}
if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
if (length < ntohl(cd->teamOffset))
return EBADEXEC;
uint8_t *ptr = (uint8_t *)cd + ntohl(cd->teamOffset);
if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL)
return EBADEXEC;
}
return 0;
}
static int
cs_validate_blob(const CS_GenericBlob *blob, size_t length)
{
if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length))
return EBADEXEC;
return 0;
}
static int
cs_validate_csblob(const uint8_t *addr, size_t length,
const CS_CodeDirectory **rcd)
{
const CS_GenericBlob *blob = (const CS_GenericBlob *)(void *)addr;
int error;
*rcd = NULL;
error = cs_validate_blob(blob, length);
if (error)
return error;
length = ntohl(blob->length);
if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
uint32_t n, count = ntohl(sb->count);
if (length < sizeof(CS_SuperBlob))
return EBADEXEC;
if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count)
return EBADEXEC;
for (n = 0; n < count; n++) {
const CS_BlobIndex *blobIndex = &sb->index[n];
if (length < ntohl(blobIndex->offset))
return EBADEXEC;
const CS_GenericBlob *subBlob =
(const CS_GenericBlob *)(void *)(addr + ntohl(blobIndex->offset));
size_t subLength = length - ntohl(blobIndex->offset);
if ((error = cs_validate_blob(subBlob, subLength)) != 0)
return error;
subLength = ntohl(subBlob->length);
if (ntohl(blobIndex->type) == CSSLOT_CODEDIRECTORY) {
const CS_CodeDirectory *cd = (const CS_CodeDirectory *)subBlob;
if ((error = cs_validate_codedirectory(cd, subLength)) != 0)
return error;
*rcd = cd;
}
}
} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(void *)addr, length)) != 0)
return error;
*rcd = (const CS_CodeDirectory *)blob;
} else {
return EBADEXEC;
}
if (*rcd == NULL)
return EBADEXEC;
return 0;
}
static const CS_GenericBlob *
cs_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
{
const CS_GenericBlob *blob = (const CS_GenericBlob *)(void *)addr;
if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
size_t n, count = ntohl(sb->count);
for (n = 0; n < count; n++) {
if (ntohl(sb->index[n].type) != type)
continue;
uint32_t offset = ntohl(sb->index[n].offset);
if (length - sizeof(const CS_GenericBlob) < offset)
return NULL;
blob = (const CS_GenericBlob *)(void *)(addr + offset);
if (ntohl(blob->magic) != magic)
continue;
return blob;
}
} else if (type == CSSLOT_CODEDIRECTORY
&& ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
&& magic == CSMAGIC_CODEDIRECTORY)
return blob;
return NULL;
}
const CS_GenericBlob *
cs_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
{
if ((csblob->csb_flags & CS_VALID) == 0)
return NULL;
return cs_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
}
static const uint8_t *
cs_find_special_slot(const CS_CodeDirectory *cd, uint32_t slot)
{
if (ntohl(cd->nSpecialSlots) < slot || slot == 0)
return NULL;
return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (SHA1_RESULTLEN * slot));
}
static uint8_t sha1_zero[SHA1_RESULTLEN] = { 0 };
int
cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length)
{
uint8_t computed_hash[SHA1_RESULTLEN];
const CS_GenericBlob *entitlements;
const CS_CodeDirectory *code_dir;
struct cs_blob *csblob;
const uint8_t *embedded_hash;
SHA1_CTX context;
*out_start = NULL;
*out_length = 0;
if (NULL == p->p_textvp)
return EINVAL;
if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL)
return 0;
if ((code_dir = (const CS_CodeDirectory *)cs_find_blob(csblob, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY)) == NULL)
return 0;
entitlements = cs_find_blob(csblob, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
embedded_hash = cs_find_special_slot(code_dir, CSSLOT_ENTITLEMENTS);
if (embedded_hash == NULL) {
if (entitlements)
return EBADEXEC;
return 0;
} else if (entitlements == NULL && memcmp(embedded_hash, sha1_zero, SHA1_RESULTLEN) != 0) {
return EBADEXEC;
}
SHA1Init(&context);
SHA1Update(&context, entitlements, ntohl(entitlements->length));
SHA1Final(computed_hash, &context);
if (memcmp(computed_hash, embedded_hash, SHA1_RESULTLEN) != 0)
return EBADEXEC;
*out_start = (void *)entitlements;
*out_length = ntohl(entitlements->length);
return 0;
}
const char *
cs_identity_get(proc_t p)
{
const CS_CodeDirectory *code_dir;
struct cs_blob *csblob;
if (NULL == p->p_textvp)
return NULL;
if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL)
return NULL;
if ((code_dir = (const CS_CodeDirectory *)cs_find_blob(csblob, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY)) == NULL)
return NULL;
if (code_dir->identOffset == 0)
return NULL;
return ((const char *)code_dir) + ntohl(code_dir->identOffset);
}
int
cs_blob_get(proc_t p, void **out_start, size_t *out_length)
{
struct cs_blob *csblob;
*out_start = NULL;
*out_length = 0;
if (NULL == p->p_textvp)
return EINVAL;
if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL)
return 0;
*out_start = (void *)csblob->csb_mem_kaddr;
*out_length = csblob->csb_mem_size;
return 0;
}
uint8_t *
cs_get_cdhash(struct proc *p)
{
struct cs_blob *csblob;
if (NULL == p->p_textvp)
return NULL;
if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL)
return NULL;
return csblob->csb_sha1;
}
__private_extern__ void
ubc_init(void)
{
int i;
i = (vm_size_t) sizeof (struct ubc_info);
ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
}
int
ubc_info_init(struct vnode *vp)
{
return(ubc_info_init_internal(vp, 0, 0));
}
int
ubc_info_init_withsize(struct vnode *vp, off_t filesize)
{
return(ubc_info_init_internal(vp, 1, filesize));
}
static int
ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
{
register struct ubc_info *uip;
void * pager;
int error = 0;
kern_return_t kret;
memory_object_control_t control;
uip = vp->v_ubcinfo;
if (uip == UBC_INFO_NULL) {
uip = (struct ubc_info *) zalloc(ubc_info_zone);
bzero((char *)uip, sizeof(struct ubc_info));
uip->ui_vnode = vp;
uip->ui_flags = UI_INITED;
uip->ui_ucred = NOCRED;
}
assert(uip->ui_flags != UI_NONE);
assert(uip->ui_vnode == vp);
vp->v_ubcinfo = uip;
pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
assert(pager);
SET(uip->ui_flags, UI_HASPAGER);
uip->ui_pager = pager;
kret = memory_object_create_named(pager,
(memory_object_size_t)uip->ui_size, &control);
vnode_pager_deallocate(pager);
if (kret != KERN_SUCCESS)
panic("ubc_info_init: memory_object_create_named returned %d", kret);
assert(control);
uip->ui_control = control;
SET(uip->ui_flags, UI_HASOBJREF);
if (withfsize == 0) {
error = vnode_size(vp, &uip->ui_size, vfs_context_current());
if (error)
uip->ui_size = 0;
} else {
uip->ui_size = filesize;
}
vp->v_lflag |= VNAMED_UBC;
return (error);
}
static void
ubc_info_free(struct ubc_info *uip)
{
if (IS_VALID_CRED(uip->ui_ucred)) {
kauth_cred_unref(&uip->ui_ucred);
}
if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
memory_object_control_deallocate(uip->ui_control);
cluster_release(uip);
ubc_cs_free(uip);
zfree(ubc_info_zone, uip);
return;
}
void
ubc_info_deallocate(struct ubc_info *uip)
{
ubc_info_free(uip);
}
static errno_t mach_to_bsd_errno(kern_return_t mach_err)
{
switch (mach_err) {
case KERN_SUCCESS:
return 0;
case KERN_INVALID_ADDRESS:
case KERN_INVALID_ARGUMENT:
case KERN_NOT_IN_SET:
case KERN_INVALID_NAME:
case KERN_INVALID_TASK:
case KERN_INVALID_RIGHT:
case KERN_INVALID_VALUE:
case KERN_INVALID_CAPABILITY:
case KERN_INVALID_HOST:
case KERN_MEMORY_PRESENT:
case KERN_INVALID_PROCESSOR_SET:
case KERN_INVALID_POLICY:
case KERN_ALREADY_WAITING:
case KERN_DEFAULT_SET:
case KERN_EXCEPTION_PROTECTED:
case KERN_INVALID_LEDGER:
case KERN_INVALID_MEMORY_CONTROL:
case KERN_INVALID_SECURITY:
case KERN_NOT_DEPRESSED:
case KERN_LOCK_OWNED:
case KERN_LOCK_OWNED_SELF:
return EINVAL;
case KERN_PROTECTION_FAILURE:
case KERN_NOT_RECEIVER:
case KERN_NO_ACCESS:
case KERN_POLICY_STATIC:
return EACCES;
case KERN_NO_SPACE:
case KERN_RESOURCE_SHORTAGE:
case KERN_UREFS_OVERFLOW:
case KERN_INVALID_OBJECT:
return ENOMEM;
case KERN_FAILURE:
return EIO;
case KERN_MEMORY_FAILURE:
case KERN_POLICY_LIMIT:
case KERN_CODESIGN_ERROR:
return EPERM;
case KERN_MEMORY_ERROR:
return EBUSY;
case KERN_ALREADY_IN_SET:
case KERN_NAME_EXISTS:
case KERN_RIGHT_EXISTS:
return EEXIST;
case KERN_ABORTED:
return EINTR;
case KERN_TERMINATED:
case KERN_LOCK_SET_DESTROYED:
case KERN_LOCK_UNSTABLE:
case KERN_SEMAPHORE_DESTROYED:
return ENOENT;
case KERN_RPC_SERVER_TERMINATED:
return ECONNRESET;
case KERN_NOT_SUPPORTED:
return ENOTSUP;
case KERN_NODE_DOWN:
return ENETDOWN;
case KERN_NOT_WAITING:
return ENOENT;
case KERN_OPERATION_TIMED_OUT:
return ETIMEDOUT;
default:
return EIO;
}
}
errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
{
off_t osize;
off_t lastpg, olastpgend, lastoff;
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret = KERN_SUCCESS;
if (nsize < (off_t)0)
return EINVAL;
if (!UBCINFOEXISTS(vp))
return ENOENT;
uip = vp->v_ubcinfo;
osize = uip->ui_size;
if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize)
return EAGAIN;
uip->ui_size = nsize;
if (nsize >= osize) {
if (nsize > osize) {
lock_vnode_and_post(vp, NOTE_EXTEND);
}
return 0;
}
lastpg = trunc_page_64(nsize);
olastpgend = round_page_64(osize);
control = uip->ui_control;
assert(control);
lastoff = (nsize & PAGE_MASK_64);
if (lastoff) {
upl_t upl;
upl_page_info_t *pl;
kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE);
if (kret != KERN_SUCCESS)
panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
if (upl_valid_page(pl, 0))
cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
lastpg += PAGE_SIZE_64;
}
if (olastpgend > lastpg) {
int flags;
if (lastpg == 0)
flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
else
flags = MEMORY_OBJECT_DATA_FLUSH;
kret = memory_object_lock_request(control,
(memory_object_offset_t)lastpg,
(memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
}
return mach_to_bsd_errno(kret);
}
int ubc_setsize(vnode_t vp, off_t nsize)
{
return ubc_setsize_ex(vp, nsize, 0) == 0;
}
off_t
ubc_getsize(struct vnode *vp)
{
if (!UBCINFOEXISTS(vp))
return ((off_t)0);
return (vp->v_ubcinfo->ui_size);
}
__private_extern__ int
ubc_umount(struct mount *mp)
{
vnode_iterate(mp, 0, ubc_umcallback, 0);
return(0);
}
static int
ubc_umcallback(vnode_t vp, __unused void * args)
{
if (UBCINFOEXISTS(vp)) {
(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
}
return (VNODE_RETURNED);
}
kauth_cred_t
ubc_getcred(struct vnode *vp)
{
if (UBCINFOEXISTS(vp))
return (vp->v_ubcinfo->ui_ucred);
return (NOCRED);
}
int
ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
{
struct ubc_info *uip;
kauth_cred_t credp;
struct uthread *uthread = get_bsdthread_info(thread);
if (!UBCINFOEXISTS(vp))
return (1);
vnode_lock(vp);
uip = vp->v_ubcinfo;
credp = uip->ui_ucred;
if (!IS_VALID_CRED(credp)) {
if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
uip->ui_ucred = kauth_cred_proc_ref(p);
} else {
uip->ui_ucred = uthread->uu_ucred;
kauth_cred_ref(uip->ui_ucred);
}
}
vnode_unlock(vp);
return (0);
}
int
ubc_setcred(struct vnode *vp, proc_t p)
{
struct ubc_info *uip;
kauth_cred_t credp;
if ( !UBCINFOEXISTS(vp))
return (0);
vnode_lock(vp);
uip = vp->v_ubcinfo;
credp = uip->ui_ucred;
if (!IS_VALID_CRED(credp)) {
uip->ui_ucred = kauth_cred_proc_ref(p);
}
vnode_unlock(vp);
return (1);
}
__private_extern__ memory_object_t
ubc_getpager(struct vnode *vp)
{
if (UBCINFOEXISTS(vp))
return (vp->v_ubcinfo->ui_pager);
return (0);
}
memory_object_control_t
ubc_getobject(struct vnode *vp, __unused int flags)
{
if (UBCINFOEXISTS(vp))
return((vp->v_ubcinfo->ui_control));
return (MEMORY_OBJECT_CONTROL_NULL);
}
boolean_t
ubc_strict_uncached_IO(struct vnode *vp)
{
boolean_t result = FALSE;
if (UBCINFOEXISTS(vp)) {
result = memory_object_is_slid(vp->v_ubcinfo->ui_control);
}
return result;
}
off_t
ubc_blktooff(vnode_t vp, daddr64_t blkno)
{
off_t file_offset = -1;
int error;
if (UBCINFOEXISTS(vp)) {
error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
if (error)
file_offset = -1;
}
return (file_offset);
}
daddr64_t
ubc_offtoblk(vnode_t vp, off_t offset)
{
daddr64_t blkno = -1;
int error = 0;
if (UBCINFOEXISTS(vp)) {
error = VNOP_OFFTOBLK(vp, offset, &blkno);
if (error)
blkno = -1;
}
return (blkno);
}
int
ubc_pages_resident(vnode_t vp)
{
kern_return_t kret;
boolean_t has_pages_resident;
if (!UBCINFOEXISTS(vp))
return (0);
kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
if (kret != KERN_SUCCESS)
return (0);
if (has_pages_resident == TRUE)
return (1);
return (0);
}
errno_t
ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
{
int retval;
int io_errno = 0;
if (resid_off)
*resid_off = beg_off;
retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
if (retval == 0 && io_errno == 0)
return (EINVAL);
return (io_errno);
}
static int
ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
{
memory_object_size_t tsize;
kern_return_t kret;
int request_flags = 0;
int flush_flags = MEMORY_OBJECT_RETURN_NONE;
if ( !UBCINFOEXISTS(vp))
return (0);
if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
return (0);
if (end_off <= beg_off)
return (1);
if (flags & UBC_INVALIDATE)
request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
if (flags & UBC_SYNC)
request_flags |= MEMORY_OBJECT_IO_SYNC;
if (flags & UBC_PUSHDIRTY)
flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
if (flags & UBC_PUSHALL)
flush_flags = MEMORY_OBJECT_RETURN_ALL;
beg_off = trunc_page_64(beg_off);
end_off = round_page_64(end_off);
tsize = (memory_object_size_t)end_off - beg_off;
kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
beg_off, tsize,
(memory_object_offset_t *)resid_off,
io_errno, flush_flags, request_flags,
VM_PROT_NO_CHANGE);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
__private_extern__ int
ubc_map(vnode_t vp, int flags)
{
struct ubc_info *uip;
int error = 0;
int need_ref = 0;
int need_wakeup = 0;
if (UBCINFOEXISTS(vp)) {
vnode_lock(vp);
uip = vp->v_ubcinfo;
while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
SET(uip->ui_flags, UI_MAPWAITING);
(void) msleep(&uip->ui_flags, &vp->v_lock,
PRIBIO, "ubc_map", NULL);
}
SET(uip->ui_flags, UI_MAPBUSY);
vnode_unlock(vp);
error = VNOP_MMAP(vp, flags, vfs_context_current());
if (error != EPERM)
error = 0;
vnode_lock_spin(vp);
if (error == 0) {
if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
need_ref = 1;
SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
if (flags & PROT_WRITE) {
SET(uip->ui_flags, UI_MAPPEDWRITE);
}
}
CLR(uip->ui_flags, UI_MAPBUSY);
if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
CLR(uip->ui_flags, UI_MAPWAITING);
need_wakeup = 1;
}
vnode_unlock(vp);
if (need_wakeup)
wakeup(&uip->ui_flags);
if (need_ref)
vnode_ref(vp);
}
return (error);
}
__private_extern__ void
ubc_destroy_named(vnode_t vp)
{
memory_object_control_t control;
struct ubc_info *uip;
kern_return_t kret;
if (UBCINFOEXISTS(vp)) {
uip = vp->v_ubcinfo;
control = ubc_getobject(vp, UBC_HOLDOBJECT);
if (control != MEMORY_OBJECT_CONTROL_NULL) {
kret = memory_object_destroy(control, 0);
if (kret != KERN_SUCCESS)
panic("ubc_destroy_named: memory_object_destroy failed");
}
}
}
int
ubc_isinuse(struct vnode *vp, int busycount)
{
if ( !UBCINFOEXISTS(vp))
return (0);
return(ubc_isinuse_locked(vp, busycount, 0));
}
int
ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
{
int retval = 0;
if (!locked)
vnode_lock_spin(vp);
if ((vp->v_usecount - vp->v_kusecount) > busycount)
retval = 1;
if (!locked)
vnode_unlock(vp);
return (retval);
}
__private_extern__ void
ubc_unmap(struct vnode *vp)
{
struct ubc_info *uip;
int need_rele = 0;
int need_wakeup = 0;
if (vnode_getwithref(vp))
return;
if (UBCINFOEXISTS(vp)) {
bool want_fsevent = false;
vnode_lock(vp);
uip = vp->v_ubcinfo;
while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
SET(uip->ui_flags, UI_MAPWAITING);
(void) msleep(&uip->ui_flags, &vp->v_lock,
PRIBIO, "ubc_unmap", NULL);
}
SET(uip->ui_flags, UI_MAPBUSY);
if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
if (ISSET(uip->ui_flags, UI_MAPPEDWRITE))
want_fsevent = true;
need_rele = 1;
}
vnode_unlock(vp);
if (need_rele) {
vfs_context_t ctx = vfs_context_current();
(void)VNOP_MNOMAP(vp, ctx);
#if CONFIG_FSE
if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
add_fsevent(FSE_CONTENT_MODIFIED, ctx,
FSE_ARG_VNODE, vp,
FSE_ARG_DONE);
}
#endif
vnode_rele(vp);
}
vnode_lock_spin(vp);
if (need_rele)
CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
CLR(uip->ui_flags, UI_MAPBUSY);
if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
CLR(uip->ui_flags, UI_MAPWAITING);
need_wakeup = 1;
}
vnode_unlock(vp);
if (need_wakeup)
wakeup(&uip->ui_flags);
}
vnode_put(vp);
}
kern_return_t
ubc_page_op(
struct vnode *vp,
off_t f_offset,
int ops,
ppnum_t *phys_entryp,
int *flagsp)
{
memory_object_control_t control;
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
return (memory_object_page_op(control,
(memory_object_offset_t)f_offset,
ops,
phys_entryp,
flagsp));
}
kern_return_t
ubc_range_op(
struct vnode *vp,
off_t f_offset_beg,
off_t f_offset_end,
int ops,
int *range)
{
memory_object_control_t control;
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
return (memory_object_range_op(control,
(memory_object_offset_t)f_offset_beg,
(memory_object_offset_t)f_offset_end,
ops,
range));
}
kern_return_t
ubc_create_upl(
struct vnode *vp,
off_t f_offset,
int bufsize,
upl_t *uplp,
upl_page_info_t **plp,
int uplflags)
{
memory_object_control_t control;
kern_return_t kr;
if (plp != NULL)
*plp = NULL;
*uplp = NULL;
if (bufsize & 0xfff)
return KERN_INVALID_ARGUMENT;
if (bufsize > MAX_UPL_SIZE_BYTES)
return KERN_INVALID_ARGUMENT;
if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
if (uplflags & UPL_UBC_MSYNC) {
uplflags &= UPL_RET_ONLY_DIRTY;
uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
UPL_SET_INTERNAL | UPL_SET_LITE;
} else if (uplflags & UPL_UBC_PAGEOUT) {
uplflags &= UPL_RET_ONLY_DIRTY;
if (uplflags & UPL_RET_ONLY_DIRTY)
uplflags |= UPL_NOBLOCK;
uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
} else {
uplflags |= UPL_RET_ONLY_ABSENT |
UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
UPL_SET_INTERNAL | UPL_SET_LITE;
if (bufsize > PAGE_SIZE)
uplflags |= UPL_NOBLOCK;
}
} else {
uplflags &= ~UPL_FOR_PAGEOUT;
if (uplflags & UPL_WILL_BE_DUMPED) {
uplflags &= ~UPL_WILL_BE_DUMPED;
uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
} else
uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
}
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags);
if (kr == KERN_SUCCESS && plp != NULL)
*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
return kr;
}
upl_size_t
ubc_upl_maxbufsize(
void)
{
return(MAX_UPL_SIZE_BYTES);
}
kern_return_t
ubc_upl_map(
upl_t upl,
vm_offset_t *dst_addr)
{
return (vm_upl_map(kernel_map, upl, dst_addr));
}
kern_return_t
ubc_upl_unmap(
upl_t upl)
{
return(vm_upl_unmap(kernel_map, upl));
}
kern_return_t
ubc_upl_commit(
upl_t upl)
{
upl_page_info_t *pl;
kern_return_t kr;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_commit_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int flags)
{
upl_page_info_t *pl;
boolean_t empty;
kern_return_t kr;
if (flags & UPL_COMMIT_FREE_ON_EMPTY)
flags |= UPL_COMMIT_NOTIFY_EMPTY;
if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
return KERN_INVALID_ARGUMENT;
}
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
kr = upl_commit_range(upl, offset, size, flags,
pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_abort_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int abort_flags)
{
kern_return_t kr;
boolean_t empty = FALSE;
if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_abort(
upl_t upl,
int abort_type)
{
kern_return_t kr;
kr = upl_abort(upl, abort_type);
upl_deallocate(upl);
return kr;
}
upl_page_info_t *
ubc_upl_pageinfo(
upl_t upl)
{
return (UPL_GET_INTERNAL_PAGE_LIST(upl));
}
int
UBCINFOEXISTS(const struct vnode * vp)
{
return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
}
void
ubc_upl_range_needed(
upl_t upl,
int index,
int count)
{
upl_range_needed(upl, index, count);
}
boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
{
if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED))
return FALSE;
if (writable)
*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
return TRUE;
}
boolean_t ubc_is_mapped_writable(const struct vnode *vp)
{
boolean_t writable;
return ubc_is_mapped(vp, &writable) && writable;
}
#define CS_BLOB_PAGEABLE 0
static volatile SInt32 cs_blob_size = 0;
static volatile SInt32 cs_blob_count = 0;
static SInt32 cs_blob_size_peak = 0;
static UInt32 cs_blob_size_max = 0;
static SInt32 cs_blob_count_peak = 0;
int cs_validation = 1;
#ifndef SECURE_KERNEL
SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures");
#endif
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
kern_return_t
ubc_cs_blob_allocate(
vm_offset_t *blob_addr_p,
vm_size_t *blob_size_p)
{
kern_return_t kr;
#if CS_BLOB_PAGEABLE
*blob_size_p = round_page(*blob_size_p);
kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p);
#else
*blob_addr_p = (vm_offset_t) kalloc(*blob_size_p);
if (*blob_addr_p == 0) {
kr = KERN_NO_SPACE;
} else {
kr = KERN_SUCCESS;
}
#endif
return kr;
}
void
ubc_cs_blob_deallocate(
vm_offset_t blob_addr,
vm_size_t blob_size)
{
#if CS_BLOB_PAGEABLE
kmem_free(kernel_map, blob_addr, blob_size);
#else
kfree((void *) blob_addr, blob_size);
#endif
}
int
ubc_cs_sigpup_add(
struct vnode *vp,
vm_address_t address,
vm_size_t size)
{
kern_return_t kr;
struct ubc_info *uip;
struct cs_blob *blob;
memory_object_control_t control;
const CS_CodeDirectory *cd;
int error;
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
if (memory_object_is_signed(control))
return 0;
blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
if (blob == NULL)
return ENOMEM;
blob->csb_cpu_type = CPU_TYPE_ANY;
blob->csb_base_offset = 0;
blob->csb_mem_size = size;
blob->csb_mem_offset = 0;
blob->csb_mem_handle = IPC_PORT_NULL;
blob->csb_mem_kaddr = address;
blob->csb_sigpup = 1;
blob->csb_platform_binary = 0;
blob->csb_teamid = NULL;
cd = findCodeDirectory(
(const CS_SuperBlob *) address,
(char *) address,
(char *) address + blob->csb_mem_size);
if (cd == NULL) {
error = EINVAL;
goto out;
}
blob->csb_flags = ntohl(cd->flags) | CS_VALID;
blob->csb_end_offset = round_page_4K(ntohl(cd->codeLimit));
if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
const SC_Scatter *scatter = (const SC_Scatter*)
((const char*)cd + ntohl(cd->scatterOffset));
blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE_4K;
} else {
blob->csb_start_offset = (blob->csb_end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE_4K));
}
vnode_lock(vp);
if (! UBCINFOEXISTS(vp)) {
vnode_unlock(vp);
if (cs_debug)
printf("out ubc object\n");
error = ENOENT;
goto out;
}
uip = vp->v_ubcinfo;
if (uip->cs_blobs != NULL) {
if (cs_debug)
printf("sigpup: vnode already have CD ?\n");
vnode_unlock(vp);
error = EEXIST;
goto out;
}
blob->csb_next = uip->cs_blobs;
uip->cs_blobs = blob;
OSAddAtomic(+1, &cs_blob_count);
OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size);
kr = memory_object_signed(uip->ui_control, TRUE);
if (kr != KERN_SUCCESS) {
vnode_unlock(vp);
if (cs_debug)
printf("sigpup: not signable ?\n");
error = ENOENT;
goto out;
}
vnode_unlock(vp);
error = 0;
out:
if (error) {
if (cs_debug)
printf("sigpup: not signable ?\n");
if (blob) {
kfree(blob, sizeof (*blob));
blob = NULL;
}
}
return error;
}
int
ubc_cs_blob_add(
struct vnode *vp,
cpu_type_t cputype,
off_t base_offset,
vm_address_t addr,
vm_size_t size,
__unused int flags)
{
kern_return_t kr;
struct ubc_info *uip;
struct cs_blob *blob, *oblob;
int error;
ipc_port_t blob_handle;
memory_object_size_t blob_size;
const CS_CodeDirectory *cd;
off_t blob_start_offset, blob_end_offset;
SHA1_CTX sha1ctxt;
boolean_t record_mtime;
int is_platform_binary;
record_mtime = FALSE;
is_platform_binary = 0;
blob_handle = IPC_PORT_NULL;
blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
if (blob == NULL) {
return ENOMEM;
}
#if CS_BLOB_PAGEABLE
blob_size = (memory_object_size_t) size;
kr = mach_make_memory_entry_64(kernel_map,
&blob_size,
addr,
VM_PROT_READ,
&blob_handle,
IPC_PORT_NULL);
if (kr != KERN_SUCCESS) {
error = ENOMEM;
goto out;
}
if (memory_object_round_page(blob_size) !=
(memory_object_size_t) round_page(size)) {
printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n",
blob_size, (size_t)size);
panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size);
error = EINVAL;
goto out;
}
#else
blob_size = (memory_object_size_t) size;
blob_handle = IPC_PORT_NULL;
#endif
blob->csb_cpu_type = cputype;
blob->csb_sigpup = 0;
blob->csb_base_offset = base_offset;
blob->csb_mem_size = size;
blob->csb_mem_offset = 0;
blob->csb_mem_handle = blob_handle;
blob->csb_mem_kaddr = addr;
blob->csb_flags = 0;
blob->csb_platform_binary = 0;
blob->csb_teamid = NULL;
error = cs_validate_csblob((const uint8_t *)addr, size, &cd);
if (error) {
if (cs_debug)
printf("CODESIGNING: csblob invalid: %d\n", error);
blob->csb_flags = 0;
blob->csb_start_offset = 0;
blob->csb_end_offset = 0;
memset(blob->csb_sha1, 0, SHA1_RESULTLEN);
} else {
const unsigned char *sha1_base;
int sha1_size;
blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
blob->csb_end_offset = round_page_4K(ntohl(cd->codeLimit));
if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
const SC_Scatter *scatter = (const SC_Scatter*)
((const char*)cd + ntohl(cd->scatterOffset));
blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE_4K;
} else {
blob->csb_start_offset = (blob->csb_end_offset -
(ntohl(cd->nCodeSlots) * PAGE_SIZE_4K));
}
sha1_base = (const unsigned char *) cd;
sha1_size = ntohl(cd->length);
SHA1Init(&sha1ctxt);
SHA1Update(&sha1ctxt, sha1_base, sha1_size);
SHA1Final(blob->csb_sha1, &sha1ctxt);
}
#if CONFIG_MACF
error = mac_vnode_check_signature(vp,
base_offset,
blob->csb_sha1,
(const void*)cd,
size, flags,
&is_platform_binary);
if (error) {
if (cs_debug)
printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
goto out;
}
if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !is_platform_binary) {
if (cs_debug)
printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
error = EPERM;
goto out;
}
#endif
if (is_platform_binary) {
if (cs_debug > 1)
printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
blob->csb_platform_binary = 1;
} else {
blob->csb_platform_binary = 0;
blob->csb_teamid = csblob_get_teamid(blob);
if (cs_debug > 1) {
if (blob->csb_teamid)
printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
else
printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
}
}
blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
if (blob_start_offset >= blob_end_offset ||
blob_start_offset < 0 ||
blob_end_offset <= 0) {
error = EINVAL;
goto out;
}
vnode_lock(vp);
if (! UBCINFOEXISTS(vp)) {
vnode_unlock(vp);
error = ENOENT;
goto out;
}
uip = vp->v_ubcinfo;
for (oblob = uip->cs_blobs;
oblob != NULL;
oblob = oblob->csb_next) {
off_t oblob_start_offset, oblob_end_offset;
if (blob->csb_platform_binary) { if (!oblob->csb_platform_binary) {
vnode_unlock(vp);
error = EALREADY;
goto out;
}
} else if (blob->csb_teamid) { if (oblob->csb_platform_binary ||
oblob->csb_teamid == NULL ||
strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
vnode_unlock(vp);
error = EALREADY;
goto out;
}
} else { if (oblob->csb_platform_binary ||
oblob->csb_teamid != NULL) {
vnode_unlock(vp);
error = EALREADY;
goto out;
}
}
oblob_start_offset = (oblob->csb_base_offset +
oblob->csb_start_offset);
oblob_end_offset = (oblob->csb_base_offset +
oblob->csb_end_offset);
if (blob_start_offset >= oblob_end_offset ||
blob_end_offset <= oblob_start_offset) {
} else {
if (blob_start_offset == oblob_start_offset &&
blob_end_offset == oblob_end_offset &&
blob->csb_mem_size == oblob->csb_mem_size &&
blob->csb_flags == oblob->csb_flags &&
(blob->csb_cpu_type == CPU_TYPE_ANY ||
oblob->csb_cpu_type == CPU_TYPE_ANY ||
blob->csb_cpu_type == oblob->csb_cpu_type) &&
!bcmp(blob->csb_sha1,
oblob->csb_sha1,
SHA1_RESULTLEN)) {
if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
oblob->csb_cpu_type = cputype;
}
vnode_unlock(vp);
error = EAGAIN;
goto out;
} else {
vnode_unlock(vp);
error = EALREADY;
goto out;
}
}
}
kr = memory_object_signed(uip->ui_control, TRUE);
if (kr != KERN_SUCCESS) {
vnode_unlock(vp);
error = ENOENT;
goto out;
}
if (uip->cs_blobs == NULL) {
record_mtime = TRUE;
}
uip->cs_add_gen = cs_blob_generation_count;
blob->csb_next = uip->cs_blobs;
uip->cs_blobs = blob;
OSAddAtomic(+1, &cs_blob_count);
if (cs_blob_count > cs_blob_count_peak) {
cs_blob_count_peak = cs_blob_count;
}
OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size);
if ((SInt32) cs_blob_size > cs_blob_size_peak) {
cs_blob_size_peak = (SInt32) cs_blob_size;
}
if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
cs_blob_size_max = (UInt32) blob->csb_mem_size;
}
if (cs_debug > 1) {
proc_t p;
const char *name = vnode_getname_printable(vp);
p = current_proc();
printf("CODE SIGNING: proc %d(%s) "
"loaded %s signatures for file (%s) "
"range 0x%llx:0x%llx flags 0x%x\n",
p->p_pid, p->p_comm,
blob->csb_cpu_type == -1 ? "detached" : "embedded",
name,
blob->csb_base_offset + blob->csb_start_offset,
blob->csb_base_offset + blob->csb_end_offset,
blob->csb_flags);
vnode_putname_printable(name);
}
vnode_unlock(vp);
if (record_mtime) {
vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
}
error = 0;
out:
if (error) {
if (cs_debug)
printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
if (blob) {
kfree(blob, sizeof (*blob));
blob = NULL;
}
if (blob_handle != IPC_PORT_NULL) {
mach_memory_entry_port_release(blob_handle);
blob_handle = IPC_PORT_NULL;
}
}
if (error == EAGAIN) {
error = 0;
ubc_cs_blob_deallocate(addr, size);
}
return error;
}
struct cs_blob *
ubc_cs_blob_get(
struct vnode *vp,
cpu_type_t cputype,
off_t offset)
{
struct ubc_info *uip;
struct cs_blob *blob;
off_t offset_in_blob;
vnode_lock_spin(vp);
if (! UBCINFOEXISTS(vp)) {
blob = NULL;
goto out;
}
uip = vp->v_ubcinfo;
for (blob = uip->cs_blobs;
blob != NULL;
blob = blob->csb_next) {
if (cputype != -1 && blob->csb_cpu_type == cputype) {
break;
}
if (offset != -1) {
offset_in_blob = offset - blob->csb_base_offset;
if (offset_in_blob >= blob->csb_start_offset &&
offset_in_blob < blob->csb_end_offset) {
break;
}
}
}
if (cs_debug && blob != NULL && blob->csb_sigpup) {
printf("found sig pup blob\n");
}
out:
vnode_unlock(vp);
return blob;
}
static void
ubc_cs_free(
struct ubc_info *uip)
{
struct cs_blob *blob, *next_blob;
for (blob = uip->cs_blobs;
blob != NULL;
blob = next_blob) {
next_blob = blob->csb_next;
if (blob->csb_mem_kaddr != 0 && !blob->csb_sigpup) {
ubc_cs_blob_deallocate(blob->csb_mem_kaddr,
blob->csb_mem_size);
blob->csb_mem_kaddr = 0;
}
if (blob->csb_mem_handle != IPC_PORT_NULL) {
mach_memory_entry_port_release(blob->csb_mem_handle);
}
blob->csb_mem_handle = IPC_PORT_NULL;
OSAddAtomic(-1, &cs_blob_count);
OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size);
kfree(blob, sizeof (*blob));
}
#if CHECK_CS_VALIDATION_BITMAP
ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
#endif
uip->cs_blobs = NULL;
}
int
ubc_cs_generation_check(
struct vnode *vp)
{
int retval = ENEEDAUTH;
vnode_lock_spin(vp);
if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
retval = 0;
}
vnode_unlock(vp);
return retval;
}
int
ubc_cs_blob_revalidate(
struct vnode *vp,
struct cs_blob *blob,
__unused int flags
)
{
int error = 0;
#if CONFIG_MACF
int is_platform_binary = 0;
#endif
const CS_CodeDirectory *cd = NULL;
assert(vp != NULL);
assert(blob != NULL);
error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, &cd);
if (error) {
if (cs_debug) {
printf("CODESIGNING: csblob invalid: %d\n", error);
}
goto out;
}
#if CONFIG_MACF
error = mac_vnode_check_signature(vp, blob->csb_base_offset, blob->csb_sha1, (const void*)cd, blob->csb_cpu_type, flags, &is_platform_binary);
if (cs_debug && error) {
printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
}
#endif
vnode_lock_spin(vp);
if (UBCINFOEXISTS(vp)) {
if (error == 0)
vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
else
vp->v_ubcinfo->cs_add_gen = 0;
}
vnode_unlock(vp);
out:
return error;
}
void
cs_blob_reset_cache()
{
OSAddAtomic(+2, &cs_blob_generation_count);
printf("Reseting cs_blob cache from all vnodes. \n");
}
struct cs_blob *
ubc_get_cs_blobs(
struct vnode *vp)
{
struct ubc_info *uip;
struct cs_blob *blobs;
if (! UBCINFOEXISTS(vp)) {
blobs = NULL;
goto out;
}
uip = vp->v_ubcinfo;
blobs = uip->cs_blobs;
out:
return blobs;
}
void
ubc_get_cs_mtime(
struct vnode *vp,
struct timespec *cs_mtime)
{
struct ubc_info *uip;
if (! UBCINFOEXISTS(vp)) {
cs_mtime->tv_sec = 0;
cs_mtime->tv_nsec = 0;
return;
}
uip = vp->v_ubcinfo;
cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
}
unsigned long cs_validate_page_no_hash = 0;
unsigned long cs_validate_page_bad_hash = 0;
boolean_t
cs_validate_page(
void *_blobs,
memory_object_t pager,
memory_object_offset_t page_offset,
const void *data,
unsigned *tainted)
{
SHA1_CTX sha1ctxt;
unsigned char actual_hash[SHA1_RESULTLEN];
unsigned char expected_hash[SHA1_RESULTLEN];
boolean_t found_hash;
struct cs_blob *blobs, *blob;
const CS_CodeDirectory *cd;
const CS_SuperBlob *embedded;
const unsigned char *hash;
boolean_t validated;
off_t offset;
size_t size;
off_t codeLimit = 0;
char *lower_bound, *upper_bound;
vm_offset_t kaddr, blob_addr;
vm_size_t ksize;
kern_return_t kr;
offset = page_offset;
found_hash = FALSE;
blobs = (struct cs_blob *) _blobs;
for (blob = blobs;
blob != NULL;
blob = blob->csb_next) {
offset = page_offset - blob->csb_base_offset;
if (offset < blob->csb_start_offset ||
offset >= blob->csb_end_offset) {
continue;
}
kaddr = blob->csb_mem_kaddr;
if (kaddr == 0) {
ksize = (vm_size_t) (blob->csb_mem_size +
blob->csb_mem_offset);
kr = vm_map(kernel_map,
&kaddr,
ksize,
0,
VM_FLAGS_ANYWHERE,
blob->csb_mem_handle,
0,
TRUE,
VM_PROT_READ,
VM_PROT_READ,
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
printf("cs_validate_page: failed to map blob, "
"size=0x%lx kr=0x%x\n",
(size_t)blob->csb_mem_size, kr);
break;
}
}
if (blob->csb_sigpup && cs_debug)
printf("checking for a sigpup CD\n");
blob_addr = kaddr + blob->csb_mem_offset;
lower_bound = CAST_DOWN(char *, blob_addr);
upper_bound = lower_bound + blob->csb_mem_size;
embedded = (const CS_SuperBlob *) blob_addr;
cd = findCodeDirectory(embedded, lower_bound, upper_bound);
if (cd != NULL) {
if (cd->pageSize != PAGE_SHIFT_4K ||
cd->hashType != CS_HASHTYPE_SHA1 ||
cd->hashSize != SHA1_RESULTLEN) {
if (blob->csb_sigpup && cs_debug)
printf("page foo bogus sigpup CD\n");
continue;
}
offset = page_offset - blob->csb_base_offset;
if (offset < blob->csb_start_offset ||
offset >= blob->csb_end_offset) {
if (blob->csb_sigpup && cs_debug)
printf("OOB sigpup CD\n");
continue;
}
codeLimit = ntohl(cd->codeLimit);
if (blob->csb_sigpup && cs_debug)
printf("sigpup codesize %d\n", (int)codeLimit);
hash = hashes(cd, (unsigned)(offset>>PAGE_SHIFT_4K),
lower_bound, upper_bound);
if (hash != NULL) {
bcopy(hash, expected_hash,
sizeof (expected_hash));
found_hash = TRUE;
if (blob->csb_sigpup && cs_debug)
printf("sigpup hash\n");
}
break;
} else {
if (blob->csb_sigpup && cs_debug)
printf("sig pup had no valid CD\n");
}
}
if (found_hash == FALSE) {
cs_validate_page_no_hash++;
if (cs_debug > 1) {
printf("CODE SIGNING: cs_validate_page: "
"mobj %p off 0x%llx: no hash to validate !?\n",
pager, page_offset);
}
validated = FALSE;
*tainted = 0;
} else {
*tainted = 0;
size = PAGE_SIZE_4K;
const uint32_t *asha1, *esha1;
if ((off_t)(offset + size) > codeLimit) {
assert(offset < codeLimit);
size = (size_t) (codeLimit & PAGE_MASK_4K);
*tainted |= CS_VALIDATE_NX;
}
SHA1Init(&sha1ctxt);
SHA1UpdateUsePhysicalAddress(&sha1ctxt, data, size);
SHA1Final(actual_hash, &sha1ctxt);
asha1 = (const uint32_t *) actual_hash;
esha1 = (const uint32_t *) expected_hash;
if (bcmp(expected_hash, actual_hash, SHA1_RESULTLEN) != 0) {
if (cs_debug) {
printf("CODE SIGNING: cs_validate_page: "
"mobj %p off 0x%llx size 0x%lx: "
"actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
"expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
pager, page_offset, size,
asha1[0], asha1[1], asha1[2],
asha1[3], asha1[4],
esha1[0], esha1[1], esha1[2],
esha1[3], esha1[4]);
}
cs_validate_page_bad_hash++;
*tainted |= CS_VALIDATE_TAINTED;
} else {
if (cs_debug > 10) {
printf("CODE SIGNING: cs_validate_page: "
"mobj %p off 0x%llx size 0x%lx: "
"SHA1 OK\n",
pager, page_offset, size);
}
}
validated = TRUE;
}
return validated;
}
int
ubc_cs_getcdhash(
vnode_t vp,
off_t offset,
unsigned char *cdhash)
{
struct cs_blob *blobs, *blob;
off_t rel_offset;
int ret;
vnode_lock(vp);
blobs = ubc_get_cs_blobs(vp);
for (blob = blobs;
blob != NULL;
blob = blob->csb_next) {
rel_offset = offset - blob->csb_base_offset;
if (rel_offset >= blob->csb_start_offset &&
rel_offset < blob->csb_end_offset) {
break;
}
}
if (blob == NULL) {
ret = EBADEXEC;
} else {
bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1));
ret = 0;
}
vnode_unlock(vp);
return ret;
}
#if CHECK_CS_VALIDATION_BITMAP
#define stob(s) ((atop_64((s)) + 07) >> 3)
extern boolean_t root_fs_upgrade_try;
#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
kern_return_t
ubc_cs_validation_bitmap_allocate(
vnode_t vp)
{
kern_return_t kr = KERN_SUCCESS;
struct ubc_info *uip;
char *target_bitmap;
vm_object_size_t bitmap_size;
if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) {
kr = KERN_INVALID_ARGUMENT;
} else {
uip = vp->v_ubcinfo;
if ( uip->cs_valid_bitmap == NULL ) {
bitmap_size = stob(uip->ui_size);
target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size );
if (target_bitmap == 0) {
kr = KERN_NO_SPACE;
} else {
kr = KERN_SUCCESS;
}
if( kr == KERN_SUCCESS ) {
memset( target_bitmap, 0, (size_t)bitmap_size);
uip->cs_valid_bitmap = (void*)target_bitmap;
uip->cs_valid_bitmap_size = bitmap_size;
}
}
}
return kr;
}
kern_return_t
ubc_cs_check_validation_bitmap (
vnode_t vp,
memory_object_offset_t offset,
int optype)
{
kern_return_t kr = KERN_SUCCESS;
if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) {
kr = KERN_INVALID_ARGUMENT;
} else {
struct ubc_info *uip = vp->v_ubcinfo;
char *target_bitmap = uip->cs_valid_bitmap;
if ( target_bitmap == NULL ) {
kr = KERN_INVALID_ARGUMENT;
} else {
uint64_t bit, byte;
bit = atop_64( offset );
byte = bit >> 3;
if ( byte > uip->cs_valid_bitmap_size ) {
kr = KERN_INVALID_ARGUMENT;
} else {
if (optype == CS_BITMAP_SET) {
target_bitmap[byte] |= (1 << (bit & 07));
kr = KERN_SUCCESS;
} else if (optype == CS_BITMAP_CLEAR) {
target_bitmap[byte] &= ~(1 << (bit & 07));
kr = KERN_SUCCESS;
} else if (optype == CS_BITMAP_CHECK) {
if ( target_bitmap[byte] & (1 << (bit & 07))) {
kr = KERN_SUCCESS;
} else {
kr = KERN_FAILURE;
}
}
}
}
}
return kr;
}
void
ubc_cs_validation_bitmap_deallocate(
vnode_t vp)
{
struct ubc_info *uip;
void *target_bitmap;
vm_object_size_t bitmap_size;
if ( UBCINFOEXISTS(vp)) {
uip = vp->v_ubcinfo;
if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) {
bitmap_size = uip->cs_valid_bitmap_size;
kfree( target_bitmap, (vm_size_t) bitmap_size );
uip->cs_valid_bitmap = NULL;
}
}
}
#else
kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){
return KERN_INVALID_ARGUMENT;
}
kern_return_t ubc_cs_check_validation_bitmap(
__unused struct vnode *vp,
__unused memory_object_offset_t offset,
__unused int optype){
return KERN_INVALID_ARGUMENT;
}
void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){
return;
}
#endif