#include <string.h>
#include <mach/machine.h>
#include <mach/vm_param.h>
#include <mach/vm_types.h>
#include <mach/kmod.h>
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
#include <mach-o/reloc.h>
#include <sys/types.h>
#if KERNEL
#include <libkern/kernel_mach_header.h>
#include <libkern/OSKextLib.h>
#include <libkern/OSKextLibPrivate.h>
#include <mach/vm_param.h>
#include <mach-o/fat.h>
#else
#include <architecture/byte_order.h>
#include <mach/mach_init.h>
#include <mach-o/arch.h>
#include <mach-o/swap.h>
#endif
#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
#include <AssertMacros.h>
#include "kxld_dict.h"
#include "kxld_kext.h"
#include "kxld_reloc.h"
#include "kxld_sect.h"
#include "kxld_seg.h"
#include "kxld_state.h"
#include "kxld_symtab.h"
#include "kxld_util.h"
#include "kxld_uuid.h"
#include "kxld_vtable.h"
struct symtab_command;
enum kxld_link_type {
KXLD_LINK_KERNEL,
KXLD_LINK_PSEUDO_KEXT,
KXLD_LINK_KEXT,
KXLD_LINK_UNKNOWN
};
typedef enum kxld_link_type KXLDLinkType;
struct kxld_kext {
u_char *file;
u_long size;
const char *name;
uint32_t filetype;
KXLDArray segs;
KXLDArray sects;
KXLDArray vtables;
KXLDArray extrelocs;
KXLDArray locrelocs;
KXLDDict vtable_index;
KXLDRelocator relocator;
KXLDuuid uuid;
KXLDSymtab *symtab;
kxld_addr_t link_addr;
kmod_info_t *kmod_info;
kxld_addr_t kmod_link_addr;
cpu_type_t cputype;
cpu_subtype_t cpusubtype;
KXLDLinkType link_type;
KXLDFlags flags;
boolean_t is_final_image;
boolean_t got_is_created;
struct dysymtab_command *dysymtab_hdr;
#if KXLD_USER_OR_OBJECT
KXLDArray *section_order;
#endif
#if !KERNEL
enum NXByteOrder host_order;
enum NXByteOrder target_order;
#endif
};
static kern_return_t get_target_machine_info(KXLDKext *kext, cpu_type_t cputype,
cpu_subtype_t cpusubtype);
static kern_return_t get_file_for_arch(KXLDKext *kext, u_char *file, u_long size);
static u_long get_macho_header_size(const KXLDKext *kext);
static u_long get_macho_data_size(const KXLDKext *kext);
static kern_return_t export_macho_header(const KXLDKext *kext, u_char *buf,
u_int ncmds, u_long *header_offset, u_long header_size);
static kern_return_t init_from_execute(KXLDKext *kext);
static kern_return_t init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out,
struct symtab_command **symtab_hdr_out);
static boolean_t target_supports_protected_segments(const KXLDKext *kext)
__attribute__((pure));
#if KXLD_USER_OR_OBJECT
static boolean_t target_supports_object(const KXLDKext *kext) __attribute((pure));
static kern_return_t init_from_object(KXLDKext *kext);
static kern_return_t process_relocs_from_sections(KXLDKext *kext);
#endif
#if KXLD_USER_OR_BUNDLE
static boolean_t target_supports_bundle(const KXLDKext *kext) __attribute((pure));
static kern_return_t init_from_bundle(KXLDKext *kext);
static kern_return_t process_relocs_from_tables(KXLDKext *kext);
static kern_return_t process_symbol_pointers(KXLDKext *kext);
static void add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit);
#endif
static kern_return_t get_metaclass_symbol_from_super_meta_class_pointer_symbol(
KXLDKext *kext, KXLDSym *super_metaclass_pointer_sym, KXLDSym **meta_class);
static kern_return_t resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols,
KXLDDict *obsolete_symbols);
static kern_return_t patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables,
KXLDDict *defined_symbols);
static kern_return_t validate_symbols(KXLDKext *kext);
static kern_return_t populate_kmod_info(KXLDKext *kext);
static kern_return_t copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables);
static kern_return_t create_vtables(KXLDKext *kext);
static void restrict_private_symbols(KXLDKext *kext);
#if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
static kern_return_t add_section(KXLDKext *kext, KXLDSect **sect);
#endif
#if KXLD_USER_OR_GOT
static boolean_t target_has_got(const KXLDKext *kext) __attribute__((pure));
static kern_return_t create_got(KXLDKext *kext);
static kern_return_t populate_got(KXLDKext *kext);
#endif
static boolean_t target_supports_common(const KXLDKext *kext) __attribute((pure));
#if KXLD_USER_OR_COMMON
static kern_return_t resolve_common_symbols(KXLDKext *kext);
#endif
static boolean_t target_supports_strict_patching(KXLDKext *kext)
__attribute__((pure));
#if KXLD_USER_OR_ILP32
static u_long get_macho_cmd_data_32(u_char *file, u_long offset,
u_int *filetype, u_int *ncmds);
static kern_return_t export_macho_header_32(const KXLDKext *kext, u_char *buf,
u_int ncmds, u_long *header_offset, u_long header_size);
#endif
#if KXLD_USER_OR_LP64
static u_long get_macho_cmd_data_64(u_char *file, u_long offset,
u_int *filetype, u_int *ncmds);
static kern_return_t export_macho_header_64(const KXLDKext *kext, u_char *buf,
u_int ncmds, u_long *header_offset, u_long header_size);
#endif
size_t
kxld_kext_sizeof(void)
{
return sizeof(KXLDKext);
}
kern_return_t
kxld_kext_init(KXLDKext *kext, u_char *file, u_long size,
const char *name, KXLDFlags flags, boolean_t is_kernel,
KXLDArray *section_order __unused,
cpu_type_t cputype, cpu_subtype_t cpusubtype)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
u_int i = 0;
check(kext);
check(file);
check(size);
kext->name = name;
kext->flags = flags;
#if KXLD_USER_OR_OBJECT
kext->section_order = section_order;
#endif
rval = get_target_machine_info(kext, cputype, cpusubtype);
require_noerr(rval, finish);
rval = get_file_for_arch(kext, file, size);
require_noerr(rval, finish);
rval = kxld_relocator_init(&kext->relocator, kext->cputype,
kext->cpusubtype, kxld_kext_target_needs_swap(kext));
require_noerr(rval, finish);
if (!kext->symtab) {
kext->symtab = kxld_alloc(kxld_symtab_sizeof());
require_action(kext->symtab, finish, rval=KERN_RESOURCE_SHORTAGE);
bzero(kext->symtab, kxld_symtab_sizeof());
}
if (is_kernel) {
kext->link_type = KXLD_LINK_KERNEL;
} else {
kext->link_type = KXLD_LINK_UNKNOWN;
}
if (kxld_kext_is_32_bit(kext)) {
struct mach_header *mach_hdr = (struct mach_header *) kext->file;
kext->filetype = mach_hdr->filetype;
} else {
struct mach_header_64 *mach_hdr = (struct mach_header_64 *) kext->file;
kext->filetype = mach_hdr->filetype;
}
switch (kext->filetype) {
#if KXLD_USER_OR_OBJECT
case MH_OBJECT:
rval = init_from_object(kext);
require_noerr(rval, finish);
break;
#endif
#if KXLD_USER_OR_BUNDLE
case MH_KEXT_BUNDLE:
rval = init_from_bundle(kext);
require_noerr(rval, finish);
break;
#endif
case MH_EXECUTE:
rval = init_from_execute(kext);
require_noerr(rval, finish);
break;
default:
rval = KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr,
kKxldLogFiletypeNotSupported, kext->filetype);
goto finish;
}
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
kxld_seg_set_vm_protections(seg, target_supports_protected_segments(kext));
}
switch (kext->link_type) {
case KXLD_LINK_KEXT:
(void) restrict_private_symbols(kext);
case KXLD_LINK_KERNEL:
rval = create_vtables(kext);
require_noerr(rval, finish);
break;
default:
break;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
kern_return_t
get_target_machine_info(KXLDKext *kext, cpu_type_t cputype __unused,
cpu_subtype_t cpusubtype __unused)
{
#if KERNEL
check(kext);
#if defined(__i386__)
kext->cputype = CPU_TYPE_I386;
kext->cpusubtype = CPU_SUBTYPE_I386_ALL;
return KERN_SUCCESS;
#elif defined(__ppc__)
kext->cputype = CPU_TYPE_POWERPC;
kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL;
return KERN_SUCCESS;
#elif defined(__x86_64__)
kext->cputype = CPU_TYPE_X86_64;
kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL;
return KERN_SUCCESS;
#else
kxld_log(kKxldLogLinking, kKxldLogErr,
kKxldLogArchNotSupported, _mh_execute_header->cputype);
return KERN_NOT_SUPPORTED;
#endif
#else
kern_return_t rval = KERN_FAILURE;
const NXArchInfo *host_arch = NULL;
check(kext);
host_arch = NXGetLocalArchInfo();
require_action(host_arch, finish, rval=KERN_FAILURE);
kext->host_order = host_arch->byteorder;
if (cputype) {
kext->cputype = cputype;
kext->cpusubtype = cpusubtype;
} else {
kext->cputype = host_arch->cputype;
kext->target_order = kext->host_order;
switch (kext->cputype) {
case CPU_TYPE_I386:
kext->cpusubtype = CPU_SUBTYPE_I386_ALL;
break;
case CPU_TYPE_POWERPC:
kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL;
break;
case CPU_TYPE_X86_64:
kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL;
break;
case CPU_TYPE_ARM:
kext->cpusubtype = CPU_SUBTYPE_ARM_ALL;
break;
default:
kext->cpusubtype = 0;
}
}
switch(kext->cputype) {
case CPU_TYPE_ARM:
case CPU_TYPE_I386:
case CPU_TYPE_X86_64:
kext->target_order = NX_LittleEndian;
break;
case CPU_TYPE_POWERPC:
kext->target_order = NX_BigEndian;
break;
default:
rval = KERN_NOT_SUPPORTED;
kxld_log(kKxldLogLinking, kKxldLogErr,
kKxldLogArchNotSupported, kext->cputype);
goto finish;
}
rval = KERN_SUCCESS;
finish:
return rval;
#endif
}
static kern_return_t
get_file_for_arch(KXLDKext *kext, u_char *file, u_long size)
{
kern_return_t rval = KERN_FAILURE;
struct mach_header *mach_hdr = NULL;
#if !KERNEL
struct fat_header *fat = (struct fat_header *) file;
struct fat_arch *archs = (struct fat_arch *) &fat[1];
boolean_t swap = FALSE;
#endif
check(kext);
check(file);
check(size);
kext->file = file;
kext->size = size;
#if !KERNEL
require_action(size >= sizeof(*fat), finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
if (fat->magic == FAT_CIGAM) {
(void) swap_fat_header(fat, kext->host_order);
swap = TRUE;
}
if (fat->magic == FAT_MAGIC) {
struct fat_arch *arch = NULL;
require_action(size >= (sizeof(*fat) + (fat->nfat_arch * sizeof(*archs))),
finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
if (swap) {
(void) swap_fat_arch(archs, fat->nfat_arch, kext->host_order);
}
arch = NXFindBestFatArch(kext->cputype, kext->cpusubtype, archs,
fat->nfat_arch);
require_action(arch, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogArchNotFound));
require_action(size >= arch->offset + arch->size, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
kext->file = file + arch->offset;
kext->size = arch->size;
}
#endif
if (kxld_kext_is_32_bit(kext)) {
rval = validate_and_swap_macho_32(kext->file, kext->size
#if !KERNEL
, kext->host_order
#endif
);
} else {
rval = validate_and_swap_macho_64(kext->file, kext->size
#if !KERNEL
, kext->host_order
#endif
);
}
require_noerr(rval, finish);
mach_hdr = (struct mach_header *) kext->file;
require_action(kext->cputype == mach_hdr->cputype, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
rval = KERN_SUCCESS;
finish:
return rval;
}
boolean_t
kxld_kext_is_32_bit(const KXLDKext *kext)
{
check(kext);
return kxld_is_32_bit(kext->cputype);
}
void
kxld_kext_get_cputype(const KXLDKext *kext, cpu_type_t *cputype,
cpu_subtype_t *cpusubtype)
{
check(kext);
check(cputype);
check(cpusubtype);
*cputype = kext->cputype;
*cpusubtype = kext->cpusubtype;
}
kern_return_t
kxld_kext_validate_cputype(const KXLDKext *kext, cpu_type_t cputype,
cpu_subtype_t cpusubtype __unused)
{
if (kext->cputype != cputype) return KERN_FAILURE;
return KERN_SUCCESS;
}
static boolean_t
target_supports_protected_segments(const KXLDKext *kext)
{
return (kext->is_final_image &&
kext->cputype == CPU_TYPE_X86_64);
}
#if KXLD_USER_OR_OBJECT
static boolean_t target_supports_object(const KXLDKext *kext)
{
return (kext->cputype == CPU_TYPE_POWERPC ||
kext->cputype == CPU_TYPE_I386 ||
kext->cputype == CPU_TYPE_ARM);
}
static kern_return_t
init_from_object(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
struct load_command *cmd_hdr = NULL;
struct symtab_command *symtab_hdr = NULL;
struct uuid_command *uuid_hdr = NULL;
KXLDSect *sect = NULL;
u_long offset = 0;
u_long sect_offset = 0;
u_int filetype = 0;
u_int ncmds = 0;
u_int nsects = 0;
u_int i = 0;
boolean_t has_segment = FALSE;
check(kext);
require_action(target_supports_object(kext),
finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr,
kKxldLogFiletypeNotSupported, MH_OBJECT));
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), offset,
get_macho_cmd_data_32, get_macho_cmd_data_64,
kext->file, offset, &filetype, &ncmds);
require_action(filetype == MH_OBJECT, finish, rval=KERN_FAILURE);
for (; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
cmd_hdr = (struct load_command *) (kext->file + offset);
switch(cmd_hdr->cmd) {
#if KXLD_USER_OR_ILP32
case LC_SEGMENT:
{
struct segment_command *seg_hdr =
(struct segment_command *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
require_action(kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"LC_SEGMENT in 64-bit kext."));
require_action(!has_segment, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"Multiple segments in an MH_OBJECT kext."));
nsects = seg_hdr->nsects;
sect_offset = offset + sizeof(*seg_hdr);
has_segment = TRUE;
}
break;
#endif
#if KXLD_USER_OR_LP64
case LC_SEGMENT_64:
{
struct segment_command_64 *seg_hdr =
(struct segment_command_64 *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
require_action(!kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"LC_SEGMENT_64 in a 32-bit kext."));
require_action(!has_segment, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"Multiple segments in an MH_OBJECT kext."));
nsects = seg_hdr->nsects;
sect_offset = offset + sizeof(*seg_hdr);
has_segment = TRUE;
}
break;
#endif
case LC_SYMTAB:
symtab_hdr = (struct symtab_command *) cmd_hdr;
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
kext->symtab, kext->file, symtab_hdr, 0);
require_noerr(rval, finish);
break;
case LC_UUID:
uuid_hdr = (struct uuid_command *) cmd_hdr;
kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr);
break;
case LC_UNIXTHREAD:
break;
default:
rval = KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"Invalid segment type in MH_OBJECT kext: %u.", cmd_hdr->cmd);
goto finish;
}
}
if (has_segment) {
rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects);
require_noerr(rval, finish);
for (i = 0; i < nsects; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64,
sect, kext->file, §_offset, i, &kext->relocator);
require_noerr(rval, finish);
}
#if KXLD_USER_OR_GOT
rval = create_got(kext);
require_noerr(rval, finish);
#endif
#if KXLD_USER_OR_COMMON
rval = resolve_common_symbols(kext);
require_noerr(rval, finish);
#endif
rval = kxld_seg_create_seg_from_sections(&kext->segs, &kext->sects);
require_noerr(rval, finish);
rval = kxld_seg_finalize_object_segment(&kext->segs,
kext->section_order, get_macho_header_size(kext));
require_noerr(rval, finish);
kext->link_type = KXLD_LINK_KEXT;
} else {
kext->link_type = KXLD_LINK_PSEUDO_KEXT;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
static kern_return_t
init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out,
struct symtab_command **symtab_hdr_out)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
KXLDSect *sect = NULL;
struct load_command *cmd_hdr = NULL;
struct symtab_command *symtab_hdr = NULL;
struct uuid_command *uuid_hdr = NULL;
u_long base_offset = 0;
u_long offset = 0;
u_long sect_offset = 0;
u_int filetype = 0;
u_int i = 0;
u_int j = 0;
u_int segi = 0;
u_int secti = 0;
u_int nsegs = 0;
u_int nsects = 0;
u_int ncmds = 0;
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), base_offset,
get_macho_cmd_data_32, get_macho_cmd_data_64,
kext->file, offset, &filetype, &ncmds);
offset = base_offset;
for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
cmd_hdr = (struct load_command *) (kext->file + offset);
switch(cmd_hdr->cmd) {
#if KXLD_USER_OR_ILP32
case LC_SEGMENT:
{
struct segment_command *seg_hdr =
(struct segment_command *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
++nsegs;
nsects += seg_hdr->nsects;
}
break;
#endif
#if KXLD_USER_OR_LP64
case LC_SEGMENT_64:
{
struct segment_command_64 *seg_hdr =
(struct segment_command_64 *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
++nsegs;
nsects += seg_hdr->nsects;
}
break;
#endif
default:
continue;
}
}
if (nsegs) {
rval = kxld_array_init(&kext->segs, sizeof(KXLDSeg), nsegs);
require_noerr(rval, finish);
rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects);
require_noerr(rval, finish);
}
offset = base_offset;
for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
cmd_hdr = (struct load_command *) (kext->file + offset);
seg = NULL;
switch(cmd_hdr->cmd) {
#if KXLD_USER_OR_ILP32
case LC_SEGMENT:
{
struct segment_command *seg_hdr =
(struct segment_command *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
seg = kxld_array_get_item(&kext->segs, segi++);
rval = kxld_seg_init_from_macho_32(seg, seg_hdr);
require_noerr(rval, finish);
sect_offset = offset + sizeof(*seg_hdr);
}
break;
#endif
#if KXLD_USER_OR_LP64
case LC_SEGMENT_64:
{
struct segment_command_64 *seg_hdr =
(struct segment_command_64 *) cmd_hdr;
if (!seg_hdr->vmsize) continue;
seg = kxld_array_get_item(&kext->segs, segi++);
rval = kxld_seg_init_from_macho_64(seg, seg_hdr);
require_noerr(rval, finish);
sect_offset = offset + sizeof(*seg_hdr);
}
break;
#endif
case LC_SYMTAB:
symtab_hdr = (struct symtab_command *) cmd_hdr;
break;
case LC_UUID:
uuid_hdr = (struct uuid_command *) cmd_hdr;
kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr);
break;
case LC_DYSYMTAB:
kext->dysymtab_hdr = (struct dysymtab_command *) cmd_hdr;
rval = kxld_reloc_create_macho(&kext->extrelocs, &kext->relocator,
(struct relocation_info *) (kext->file + kext->dysymtab_hdr->extreloff),
kext->dysymtab_hdr->nextrel);
require_noerr(rval, finish);
rval = kxld_reloc_create_macho(&kext->locrelocs, &kext->relocator,
(struct relocation_info *) (kext->file + kext->dysymtab_hdr->locreloff),
kext->dysymtab_hdr->nlocrel);
require_noerr(rval, finish);
break;
case LC_UNIXTHREAD:
require_action(kext->link_type == KXLD_LINK_KERNEL, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"LC_UNIXTHREAD segment is not valid in a kext."));
break;
default:
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"Invalid segment type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr->cmd);
goto finish;
}
if (seg) {
for (j = 0; j < seg->sects.nitems; ++j, ++secti) {
sect = kxld_array_get_item(&kext->sects, secti);
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64,
sect, kext->file, §_offset, secti, &kext->relocator);
require_noerr(rval, finish);
rval = kxld_seg_add_section(seg, sect);
require_noerr(rval, finish);
}
rval = kxld_seg_finish_init(seg);
require_noerr(rval, finish);
}
}
if (filetype_out) *filetype_out = filetype;
if (symtab_hdr_out) *symtab_hdr_out = symtab_hdr;
kext->is_final_image = TRUE;
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
init_from_execute(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
struct symtab_command *symtab_hdr = NULL;
kxld_addr_t linkedit_offset = 0;
u_int filetype = 0;
#if KERNEL
KXLDSeg *textseg = NULL;
KXLDSeg *linkeditseg = NULL;
#endif
#if KXLD_USER_OR_OBJECT
KXLDSeg *seg = NULL;
KXLDSect *sect = NULL;
KXLDSectionName *sname = NULL;
u_int i = 0, j = 0, k = 0;
#endif
check(kext);
require_action(kext->link_type == KXLD_LINK_KERNEL, finish,
rval=KERN_FAILURE);
rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr);
require_noerr(rval, finish);
require_action(filetype == MH_EXECUTE, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"The kernel file is not of type MH_EXECUTE."));
#if KERNEL
textseg = kxld_kext_get_seg_by_name(kext, SEG_TEXT);
require_action(textseg, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
linkeditseg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT);
require_action(linkeditseg, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
linkedit_offset = linkeditseg->base_addr - textseg->base_addr -
linkeditseg->fileoff;
#endif
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
kext->symtab, kext->file, symtab_hdr, linkedit_offset);
require_noerr(rval, finish);
#if KXLD_USER_OR_OBJECT
if (target_supports_object(kext)) {
rval = kxld_array_init(kext->section_order, sizeof(KXLDSectionName),
kext->sects.nitems);
require_noerr(rval, finish);
for (i = 0, k = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
for (j = 0; j < seg->sects.nitems; ++j, ++k) {
sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, j);
sname = kxld_array_get_item(kext->section_order, k);
strlcpy(sname->segname, sect->segname, sizeof(sname->segname));
strlcpy(sname->sectname, sect->sectname, sizeof(sname->sectname));
}
}
}
#endif
rval = KERN_SUCCESS;
finish:
return rval;
}
#if KXLD_USER_OR_BUNDLE
static boolean_t
target_supports_bundle(const KXLDKext *kext)
{
return (kext->cputype == CPU_TYPE_X86_64);
}
static kern_return_t
init_from_bundle(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
struct symtab_command *symtab_hdr = NULL;
u_int filetype = 0;
u_int idx = 0;
check(kext);
require_action(target_supports_bundle(kext), finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr,
kKxldLogFiletypeNotSupported, MH_KEXT_BUNDLE));
rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr);
require_noerr(rval, finish);
require_action(filetype == MH_KEXT_BUNDLE, finish,
rval=KERN_FAILURE);
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
kext->symtab, kext->file, symtab_hdr, 0);
require_noerr(rval, finish);
if (kext->segs.nitems) {
seg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT);
if (seg) {
rval = kxld_array_get_index(&kext->segs, seg, &idx);
require_noerr(rval, finish);
kxld_seg_deinit(seg);
rval = kxld_array_remove(&kext->segs, idx);
require_noerr(rval, finish);
}
kext->link_type = KXLD_LINK_KEXT;
} else {
kext->link_type = KXLD_LINK_PSEUDO_KEXT;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
#if KXLD_USER_OR_ILP32
static u_long
get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds)
{
struct mach_header *mach_hdr = (struct mach_header *) (file + offset);
if (filetype) *filetype = mach_hdr->filetype;
if (ncmds) *ncmds = mach_hdr->ncmds;
return sizeof(*mach_hdr);
}
#endif
#if KXLD_USER_OR_LP64
static u_long
get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds)
{
struct mach_header_64 *mach_hdr = (struct mach_header_64 *) (file + offset);
if (filetype) *filetype = mach_hdr->filetype;
if (ncmds) *ncmds = mach_hdr->ncmds;
return sizeof(*mach_hdr);
}
#endif
static kern_return_t
create_vtables(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
KXLDSym *vtable_sym = NULL;
KXLDSym *meta_vtable_sym = NULL;
KXLDSect *vtable_sect = NULL;
KXLDSect *meta_vtable_sect = NULL;
KXLDVTable *vtable = NULL;
KXLDVTable *meta_vtable = NULL;
char class_name[KXLD_MAX_NAME_LEN];
char vtable_name[KXLD_MAX_NAME_LEN];
char meta_vtable_name[KXLD_MAX_NAME_LEN];
u_int i = 0;
u_int nvtables = 0;
if (kext->link_type == KXLD_LINK_KERNEL) {
kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_vtable, FALSE);
nvtables = kxld_symtab_iterator_get_num_remaining(&iter);
} else {
kxld_symtab_iterator_init(&iter, kext->symtab,
kxld_sym_is_super_metaclass_pointer, FALSE);
nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2;
}
rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables);
require_noerr(rval, finish);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
if (kext->link_type == KXLD_LINK_KERNEL) {
vtable_sym = sym;
} else {
rval = kxld_sym_get_class_name_from_super_metaclass_pointer(
sym, class_name, sizeof(class_name));
require_noerr(rval, finish);
rval = kxld_sym_get_vtable_name_from_class_name(class_name,
vtable_name, sizeof(vtable_name));
require_noerr(rval, finish);
vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab, vtable_name);
require_action(vtable_sym, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
vtable_name, class_name));
rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
meta_vtable_name, sizeof(meta_vtable_name));
require_noerr(rval, finish);
meta_vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab,
meta_vtable_name);
if (!meta_vtable_sym) {
if (target_supports_strict_patching(kext)) {
kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
meta_vtable_name, class_name);
rval = KERN_FAILURE;
goto finish;
} else {
kxld_log(kKxldLogPatching, kKxldLogErr,
"Warning: " kKxldLogMissingVtable,
meta_vtable_name, class_name);
kxld_array_resize(&kext->vtables, --nvtables);
}
}
}
vtable_sect = kxld_array_get_item(&kext->sects, vtable_sym->sectnum);
require_action(vtable_sect, finish, rval=KERN_FAILURE);
vtable = kxld_array_get_item(&kext->vtables, i++);
if (kext->link_type == KXLD_LINK_KERNEL) {
rval = kxld_vtable_init_from_kernel_macho(vtable, vtable_sym,
vtable_sect, kext->symtab, &kext->relocator);
require_noerr(rval, finish);
} else {
if (kext->is_final_image) {
rval = kxld_vtable_init_from_final_macho(vtable, vtable_sym,
vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs);
require_noerr(rval, finish);
} else {
rval = kxld_vtable_init_from_object_macho(vtable, vtable_sym,
vtable_sect, kext->symtab, &kext->relocator);
require_noerr(rval, finish);
}
if (meta_vtable_sym) {
meta_vtable_sect = kxld_array_get_item(&kext->sects,
meta_vtable_sym->sectnum);
require_action(vtable_sect, finish, rval=KERN_FAILURE);
meta_vtable = kxld_array_get_item(&kext->vtables, i++);
if (kext->is_final_image) {
rval = kxld_vtable_init_from_final_macho(meta_vtable, meta_vtable_sym,
meta_vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs);
require_noerr(rval, finish);
} else {
rval = kxld_vtable_init_from_object_macho(meta_vtable, meta_vtable_sym,
meta_vtable_sect, kext->symtab, &kext->relocator);
require_noerr(rval, finish);
}
}
}
}
require_action(i == kext->vtables.nitems, finish,
rval=KERN_FAILURE);
rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash,
kxld_dict_string_cmp, kext->vtables.nitems);
require_noerr(rval, finish);
for (i = 0; i < kext->vtables.nitems; ++i) {
vtable = kxld_array_get_item(&kext->vtables, i);
rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable);
require_noerr(rval, finish);
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static void
restrict_private_symbols(KXLDKext *kext)
{
const char *private_symbols[] = {
KXLD_KMOD_INFO_SYMBOL,
KXLD_OPERATOR_NEW_SYMBOL,
KXLD_OPERATOR_NEW_ARRAY_SYMBOL,
KXLD_OPERATOR_DELETE_SYMBOL,
KXLD_OPERATOR_DELETE_ARRAY_SYMBOL
};
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
const char *name = NULL;
u_int i = 0;
kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
for (i = 0; i < const_array_len(private_symbols); ++i) {
name = private_symbols[i];
if (!streq(sym->name, name)) {
continue;
}
kxld_sym_mark_private(sym);
}
}
}
void
kxld_kext_clear(KXLDKext *kext)
{
KXLDSeg *seg = NULL;
KXLDSect *sect = NULL;
KXLDVTable *vtable = NULL;
u_int i;
check(kext);
#if !KERNEL
if (kext->link_type == KXLD_LINK_KERNEL) {
unswap_macho(kext->file, kext->host_order, kext->target_order);
}
#endif
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
kxld_seg_clear(seg);
}
kxld_array_reset(&kext->segs);
for (i = 0; i < kext->sects.nitems; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
kxld_sect_clear(sect);
}
kxld_array_reset(&kext->sects);
for (i = 0; i < kext->vtables.nitems; ++i) {
vtable = kxld_array_get_item(&kext->vtables, i);
kxld_vtable_clear(vtable);
}
kxld_array_reset(&kext->vtables);
kxld_array_reset(&kext->extrelocs);
kxld_array_reset(&kext->locrelocs);
kxld_dict_clear(&kext->vtable_index);
kxld_relocator_clear(&kext->relocator);
kxld_uuid_clear(&kext->uuid);
if (kext->symtab) kxld_symtab_clear(kext->symtab);
kext->link_addr = 0;
kext->kmod_link_addr = 0;
kext->cputype = 0;
kext->cpusubtype = 0;
kext->link_type = KXLD_LINK_UNKNOWN;
kext->is_final_image = FALSE;
kext->got_is_created = FALSE;
}
void
kxld_kext_deinit(KXLDKext *kext)
{
KXLDSeg *seg = NULL;
KXLDSect *sect = NULL;
KXLDVTable *vtable = NULL;
u_int i;
check(kext);
#if !KERNEL
if (kext->link_type == KXLD_LINK_KERNEL) {
unswap_macho(kext->file, kext->host_order, kext->target_order);
}
#endif
for (i = 0; i < kext->segs.maxitems; ++i) {
seg = kxld_array_get_slot(&kext->segs, i);
kxld_seg_deinit(seg);
}
kxld_array_deinit(&kext->segs);
for (i = 0; i < kext->sects.maxitems; ++i) {
sect = kxld_array_get_slot(&kext->sects, i);
kxld_sect_deinit(sect);
}
kxld_array_deinit(&kext->sects);
for (i = 0; i < kext->vtables.maxitems; ++i) {
vtable = kxld_array_get_slot(&kext->vtables, i);
kxld_vtable_deinit(vtable);
}
kxld_array_deinit(&kext->vtables);
kxld_array_deinit(&kext->extrelocs);
kxld_array_deinit(&kext->locrelocs);
kxld_dict_deinit(&kext->vtable_index);
if (kext->symtab) {
kxld_symtab_deinit(kext->symtab);
kxld_free(kext->symtab, kxld_symtab_sizeof());
}
bzero(kext, sizeof(*kext));
}
boolean_t
kxld_kext_is_true_kext(const KXLDKext *kext)
{
return (kext->link_type == KXLD_LINK_KEXT);
}
void
kxld_kext_get_vmsize(const KXLDKext *kext, u_long *header_size, u_long *vmsize)
{
check(kext);
check(header_size);
check(vmsize);
*header_size = 0;
*vmsize = 0;
*header_size = (kext->is_final_image) ?
0 : round_page(get_macho_header_size(kext));
*vmsize = *header_size + get_macho_data_size(kext);
}
const struct kxld_symtab *
kxld_kext_get_symtab(const KXLDKext *kext)
{
check(kext);
return kext->symtab;
}
u_int
kxld_kext_get_num_symbols(const KXLDKext *kext)
{
check(kext);
return kxld_symtab_get_num_symbols(kext->symtab);
}
void
kxld_kext_get_vtables(KXLDKext *kext, const KXLDArray **vtables)
{
check(kext);
check(vtables);
*vtables = &kext->vtables;
}
u_int
kxld_kext_get_num_vtables(const KXLDKext *kext)
{
check(kext);
return kext->vtables.nitems;
}
KXLDSeg *
kxld_kext_get_seg_by_name(const KXLDKext *kext, const char *segname)
{
KXLDSeg *seg = NULL;
u_int i = 0;
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
if (streq(segname, seg->segname)) break;
seg = NULL;
}
return seg;
}
KXLDSect *
kxld_kext_get_sect_by_name(const KXLDKext *kext, const char *segname,
const char *sectname)
{
KXLDSect *sect = NULL;
u_int i = 0;
for (i = 0; i < kext->sects.nitems; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
if (streq(segname, sect->segname) && streq(sectname, sect->sectname)) {
break;
}
sect = NULL;
}
return sect;
}
int
kxld_kext_get_sectnum_for_sect(const KXLDKext *kext, const KXLDSect *sect)
{
kern_return_t rval = KERN_FAILURE;
u_int idx = -1;
rval = kxld_array_get_index(&kext->sects, sect, &idx);
if (rval) idx = -1;
return idx;
}
const KXLDArray *
kxld_kext_get_section_order(const KXLDKext *kext __unused)
{
#if KXLD_USER_OR_OBJECT
if (kext->link_type == KXLD_LINK_KERNEL && target_supports_object(kext)) {
return kext->section_order;
}
#endif
return NULL;
}
static u_long
get_macho_header_size(const KXLDKext *kext)
{
KXLDSeg *seg = NULL;
u_long header_size = 0;
u_int i = 0;
check(kext);
if (kxld_kext_is_32_bit(kext)) {
header_size += sizeof(struct mach_header);
} else {
header_size += sizeof(struct mach_header_64);
}
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext));
}
if (kext->uuid.has_uuid) {
header_size += kxld_uuid_get_macho_header_size();
}
return header_size;
}
static u_long
get_macho_data_size(const KXLDKext *kext)
{
KXLDSeg *seg = NULL;
u_long data_size = 0;
u_int i = 0;
check(kext);
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
data_size += (u_long) kxld_seg_get_vmsize(seg);
}
return data_size;
}
kern_return_t kxld_kext_export_linked_object(const KXLDKext *kext,
u_char *linked_object, kxld_addr_t *kmod_info_kern)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
u_long size = 0;
u_long header_size = 0;
u_long header_offset = 0;
u_long data_offset = 0;
u_int ncmds = 0;
u_int i = 0;
check(kext);
check(linked_object);
check(kmod_info_kern);
*kmod_info_kern = 0;
header_size = get_macho_header_size(kext);
data_offset = (kext->is_final_image) ? header_size : round_page(header_size);
size = data_offset + get_macho_data_size(kext);
ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE);
rval = export_macho_header(kext, linked_object, ncmds,
&header_offset, header_size);
require_noerr(rval, finish);
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
rval = kxld_seg_export_macho_to_vm(seg, linked_object, &header_offset,
header_size, size, kext->link_addr, kxld_kext_is_32_bit(kext));
require_noerr(rval, finish);
}
if (kext->uuid.has_uuid) {
rval = kxld_uuid_export_macho(&kext->uuid, linked_object,
&header_offset, header_size);
require_noerr(rval, finish);
}
*kmod_info_kern = kext->kmod_link_addr;
#if !KERNEL
unswap_macho(linked_object, kext->host_order, kext->target_order);
#endif
rval = KERN_SUCCESS;
finish:
return rval;
}
#if !KERNEL
kern_return_t
kxld_kext_export_symbol_file(const KXLDKext *kext,
u_char **_symbol_file, u_long *_filesize)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
u_char *file = NULL;
u_long size = 0;
u_long header_size = 0;
u_long header_offset = 0;
u_long data_offset = 0;
u_int ncmds = 0;
u_int i = 0;
check(kext);
check(_symbol_file);
*_symbol_file = NULL;
if (kxld_kext_is_32_bit(kext)) {
header_size += sizeof(struct mach_header);
} else {
header_size += sizeof(struct mach_header_64);
}
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext));
size += kxld_seg_get_macho_data_size(seg);
}
header_size += kxld_symtab_get_macho_header_size();
size += kxld_symtab_get_macho_data_size(kext->symtab, FALSE,
kxld_kext_is_32_bit(kext));
if (kext->uuid.has_uuid) {
header_size += kxld_uuid_get_macho_header_size();
}
data_offset = round_page(header_size);
size += data_offset;
file = kxld_page_alloc_untracked(size);
require_action(file, finish, rval=KERN_RESOURCE_SHORTAGE);
bzero(file, size);
ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE) + 1;
rval = export_macho_header(kext, file, ncmds, &header_offset, header_size);
require_noerr(rval, finish);
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
rval = kxld_seg_export_macho_to_file_buffer(seg, file, &header_offset,
header_size, &data_offset, size, kxld_kext_is_32_bit(kext));
require_noerr(rval, finish);
}
rval = kxld_symtab_export_macho(kext->symtab, file, &header_offset,
header_size, &data_offset, size, FALSE, kxld_kext_is_32_bit(kext));
require_noerr(rval, finish);
if (kext->uuid.has_uuid) {
rval = kxld_uuid_export_macho(&kext->uuid, file, &header_offset,
header_size);
require_noerr(rval, finish);
}
header_offset = header_size;
unswap_macho(file, kext->host_order, kext->target_order);
*_filesize = size;
*_symbol_file = file;
file = NULL;
rval = KERN_SUCCESS;
finish:
if (file) {
kxld_page_free_untracked(file, size);
file = NULL;
}
check(!file);
check((!rval) ^ (!*_symbol_file));
return rval;
}
#endif
boolean_t
kxld_kext_target_needs_swap(const KXLDKext *kext __unused)
{
#if KERNEL
return FALSE;
#else
return (kext->target_order != kext->host_order);
#endif
}
static kern_return_t
export_macho_header(const KXLDKext *kext, u_char *buf, u_int ncmds,
u_long *header_offset, u_long header_size)
{
kern_return_t rval = KERN_FAILURE;
check(kext);
check(buf);
check(header_offset);
KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
export_macho_header_32, export_macho_header_64,
kext, buf, ncmds, header_offset, header_size);
require_noerr(rval, finish);
rval = KERN_SUCCESS;
finish:
return rval;
}
#if KXLD_USER_OR_ILP32
static kern_return_t
export_macho_header_32(const KXLDKext *kext, u_char *buf, u_int ncmds,
u_long *header_offset, u_long header_size)
{
kern_return_t rval = KERN_FAILURE;
struct mach_header *mach = NULL;
check(kext);
check(buf);
check(header_offset);
require_action(sizeof(*mach) <= header_size - *header_offset, finish,
rval=KERN_FAILURE);
mach = (struct mach_header *) (buf + *header_offset);
mach->magic = MH_MAGIC;
mach->cputype = kext->cputype;
mach->filetype = kext->filetype;
mach->ncmds = ncmds;
mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach));
mach->flags = MH_NOUNDEFS;
*header_offset += sizeof(*mach);
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
#if KXLD_USER_OR_LP64
static kern_return_t
export_macho_header_64(const KXLDKext *kext, u_char *buf, u_int ncmds,
u_long *header_offset, u_long header_size)
{
kern_return_t rval = KERN_FAILURE;
struct mach_header_64 *mach = NULL;
check(kext);
check(buf);
check(header_offset);
require_action(sizeof(*mach) <= header_size - *header_offset, finish,
rval=KERN_FAILURE);
mach = (struct mach_header_64 *) (buf + *header_offset);
mach->magic = MH_MAGIC_64;
mach->cputype = kext->cputype;
mach->cpusubtype = kext->cpusubtype;
mach->filetype = kext->filetype;
mach->ncmds = ncmds;
mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach));
mach->flags = MH_NOUNDEFS;
*header_offset += sizeof(*mach);
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
kern_return_t
kxld_kext_resolve(KXLDKext *kext, struct kxld_dict *patched_vtables,
struct kxld_dict *defined_symbols)
{
kern_return_t rval = KERN_FAILURE;
require_action(kext->link_type == KXLD_LINK_PSEUDO_KEXT, finish,
rval=KERN_FAILURE);
rval = resolve_symbols(kext, defined_symbols, NULL);
require_noerr(rval, finish);
rval = validate_symbols(kext);
require_noerr(rval, finish);
rval = copy_vtables(kext, patched_vtables);
require_noerr(rval, finish);
rval = KERN_SUCCESS;
finish:
return rval;
}
kern_return_t
kxld_kext_relocate(KXLDKext *kext, kxld_addr_t link_address,
KXLDDict *patched_vtables, KXLDDict *defined_symbols,
KXLDDict *obsolete_symbols)
{
kern_return_t rval = KERN_FAILURE;
KXLDSeg *seg = NULL;
u_int i = 0;
check(kext);
check(patched_vtables);
check(defined_symbols);
require_action(kext->link_type == KXLD_LINK_KEXT, finish, rval=KERN_FAILURE);
kext->link_addr = link_address;
for (i = 0; i < kext->segs.nitems; ++i) {
seg = kxld_array_get_item(&kext->segs, i);
kxld_seg_relocate(seg, link_address);
}
rval = kxld_symtab_relocate(kext->symtab, &kext->sects);
require_noerr(rval, finish);
rval = populate_kmod_info(kext);
require_noerr(rval, finish);
rval = resolve_symbols(kext, defined_symbols, obsolete_symbols);
require_noerr(rval, finish);
rval = patch_vtables(kext, patched_vtables, defined_symbols);
require_noerr(rval, finish);
rval = validate_symbols(kext);
require_noerr(rval, finish);
if (kext->is_final_image) {
#if KXLD_USER_OR_BUNDLE
rval = process_symbol_pointers(kext);
require_noerr(rval, finish);
rval = process_relocs_from_tables(kext);
require_noerr(rval, finish);
#else
require_action(FALSE, finish, rval=KERN_FAILURE);
#endif
} else {
#if KXLD_USER_OR_GOT
rval = populate_got(kext);
require_noerr(rval, finish);
#endif
#if KXLD_USER_OR_OBJECT
rval = process_relocs_from_sections(kext);
require_noerr(rval, finish);
#else
require_action(FALSE, finish, rval=KERN_FAILURE);
#endif
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols,
KXLDDict *obsolete_symbols)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
void *addrp = NULL;
kxld_addr_t addr = 0;
const char *name = NULL;
boolean_t tests_for_weak = FALSE;
boolean_t error = FALSE;
boolean_t warning = FALSE;
check(kext);
check(defined_symbols);
sym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_WEAK_TEST_SYMBOL);
tests_for_weak = (sym != NULL);
kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
addrp = kxld_dict_find(defined_symbols, sym->name);
if (addrp) {
if (kxld_kext_is_32_bit(kext)) {
addr = (kxld_addr_t) (*(uint32_t*)addrp);
} else {
addr = (kxld_addr_t) (*(uint64_t*)addrp);
}
if (addr == sym->link_addr) {
continue;
}
if (!error) {
error = TRUE;
kxld_log(kKxldLogLinking, kKxldLogErr,
"The following symbols were defined more than once:");
}
kxld_log(kKxldLogLinking, kKxldLogErr,
"\t%s: %p - %p", sym->name,
(void *) (uintptr_t) sym->link_addr,
(void *) (uintptr_t) addr);
}
}
require_noerr_action(error, finish, rval=KERN_FAILURE);
kxld_symtab_iterator_init(&iter, kext->symtab,
kxld_sym_is_unresolved, FALSE);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
if (kxld_sym_is_common(sym)) {
if (!error) {
error = TRUE;
if (target_supports_common(kext)) {
kxld_log(kKxldLogLinking, kKxldLogErr,
"The following common symbols were not resolved:");
} else {
kxld_log(kKxldLogLinking, kKxldLogErr,
"Common symbols are not supported in kernel extensions. "
"Use -fno-common to build your kext. "
"The following are common symbols:");
}
}
kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name);
} else {
if (kxld_sym_is_undefined(sym)) {
name = sym->name;
} else {
name = sym->alias;
}
addrp = kxld_dict_find(defined_symbols, name);
if (addrp) {
if (kxld_kext_is_32_bit(kext)) {
addr = (kxld_addr_t) (*(uint32_t*)addrp);
} else {
addr = (kxld_addr_t) (*(uint64_t*)addrp);
}
boolean_t is_exported = (kext->link_type == KXLD_LINK_PSEUDO_KEXT);
rval = kxld_sym_resolve(sym, addr, is_exported);
require_noerr(rval, finish);
if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) {
kxld_log(kKxldLogLinking, kKxldLogWarn,
"This kext uses obsolete symbol %s.", name);
}
} else if (kext->link_type == KXLD_LINK_PSEUDO_KEXT) {
if (!warning) {
kxld_log(kKxldLogLinking, kKxldLogWarn,
"This symbol set has the following unresolved symbols:");
warning = TRUE;
}
kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name);
kxld_sym_delete(sym);
} else if (kxld_sym_is_weak(sym)) {
require_action(tests_for_weak, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr,
"This kext has weak references but does not test for "
"them. Test for weak references with "
"OSKextIsSymbolResolved()."));
#if KERNEL
addr = (kxld_addr_t) &kext_weak_symbol_referenced;
#else
addr = kext->link_addr;
#endif
rval = kxld_sym_resolve(sym, addr, FALSE);
require_noerr(rval, finish);
}
}
}
require_noerr_action(error, finish, rval=KERN_FAILURE);
rval = KERN_SUCCESS;
finish:
return rval;
}
static boolean_t
target_supports_strict_patching(KXLDKext *kext)
{
check(kext);
return (kext->cputype != CPU_TYPE_I386 &&
kext->cputype != CPU_TYPE_POWERPC);
}
#define kOSMetaClassVTableName "__ZTV11OSMetaClass"
static kern_return_t
patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables,
KXLDDict *defined_symbols)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *metaclass = NULL;
KXLDSym *super_metaclass_pointer = NULL;
KXLDSym *final_sym = NULL;
KXLDVTable *vtable = NULL;
KXLDVTable *super_vtable = NULL;
char class_name[KXLD_MAX_NAME_LEN];
char super_class_name[KXLD_MAX_NAME_LEN];
char vtable_name[KXLD_MAX_NAME_LEN];
char super_vtable_name[KXLD_MAX_NAME_LEN];
char final_sym_name[KXLD_MAX_NAME_LEN];
size_t len = 0;
u_int nvtables = 0;
u_int npatched = 0;
u_int nprogress = 0;
boolean_t failure = FALSE;
check(kext);
check(patched_vtables);
kxld_symtab_iterator_init(&iter, kext->symtab,
kxld_sym_is_super_metaclass_pointer, FALSE);
nvtables = kxld_symtab_iterator_get_num_remaining(&iter);
while (npatched < nvtables) {
npatched = 0;
nprogress = 0;
kxld_symtab_iterator_reset(&iter);
while((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter)))
{
rval = kxld_sym_get_class_name_from_super_metaclass_pointer(
super_metaclass_pointer, class_name, sizeof(class_name));
require_noerr(rval, finish);
rval = kxld_sym_get_vtable_name_from_class_name(class_name,
vtable_name, sizeof(vtable_name));
require_noerr(rval, finish);
vtable = kxld_dict_find(&kext->vtable_index, vtable_name);
require_action(vtable, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
vtable_name, class_name));
if (!vtable->is_patched) {
rval = get_metaclass_symbol_from_super_meta_class_pointer_symbol(
kext, super_metaclass_pointer, &metaclass);
require_noerr(rval, finish);
rval = kxld_sym_get_class_name_from_metaclass(metaclass,
super_class_name, sizeof(super_class_name));
require_noerr(rval, finish);
rval = kxld_sym_get_vtable_name_from_class_name(super_class_name,
super_vtable_name, sizeof(super_vtable_name));
require_noerr(rval, finish);
if (failure) {
kxld_log(kKxldLogPatching, kKxldLogErr,
"\t%s (super vtable %s)", vtable_name, super_vtable_name);
continue;
}
super_vtable = kxld_dict_find(patched_vtables, super_vtable_name);
if (!super_vtable) continue;
rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name,
final_sym_name, sizeof(final_sym_name));
require_noerr(rval, finish);
final_sym = kxld_dict_find(defined_symbols, final_sym_name);
if (!final_sym) {
final_sym = kxld_symtab_get_symbol_by_name(kext->symtab,
final_sym_name);
}
require_action(!final_sym, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
"Class %s is a subclass of final class %s.",
class_name, super_class_name));
rval = kxld_vtable_patch(vtable, super_vtable, kext->symtab,
target_supports_strict_patching(kext));
require_noerr(rval, finish);
rval = kxld_dict_insert(patched_vtables, vtable->name, vtable);
require_noerr(rval, finish);
rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
vtable_name, sizeof(vtable_name));
require_noerr(rval, finish);
vtable = kxld_dict_find(&kext->vtable_index, vtable_name);
if (!vtable) {
++nprogress;
++npatched;
continue;
}
require_action(!vtable->is_patched, finish, rval=KERN_FAILURE);
len = strlcpy(super_vtable_name, kOSMetaClassVTableName,
sizeof(super_vtable_name));
require_action(len == const_strlen(kOSMetaClassVTableName),
finish, rval=KERN_FAILURE);
super_vtable = kxld_dict_find(patched_vtables, super_vtable_name);
require_action(super_vtable && super_vtable->is_patched,
finish, rval=KERN_FAILURE);
rval = kxld_vtable_patch(vtable, super_vtable,
kext->symtab, target_supports_strict_patching(kext));
require_noerr(rval, finish);
rval = kxld_dict_insert(patched_vtables, vtable->name, vtable);
require_noerr(rval, finish);
++nprogress;
}
++npatched;
}
require_action(!failure, finish, rval=KERN_FAILURE);
if (!nprogress) {
failure = TRUE;
kxld_log(kKxldLogPatching, kKxldLogErr,
"The following vtables were unpatchable because each one's "
"parent vtable either was not found or also was not patchable:");
}
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
validate_symbols(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
u_int error = FALSE;
kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_unresolved, FALSE);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
if (!error) {
error = TRUE;
kxld_log(kKxldLogLinking, kKxldLogErr,
"The following symbols are unresolved for this kext:");
}
kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name);
}
require_noerr_action(error, finish, rval=KERN_FAILURE);
rval = KERN_SUCCESS;
finish:
return rval;
}
#if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
static kern_return_t
add_section(KXLDKext *kext, KXLDSect **sect)
{
kern_return_t rval = KERN_FAILURE;
u_int nsects = kext->sects.nitems;
rval = kxld_array_resize(&kext->sects, nsects + 1);
require_noerr(rval, finish);
*sect = kxld_array_get_item(&kext->sects, nsects);
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
#if KXLD_USER_OR_GOT
static boolean_t
target_has_got(const KXLDKext *kext)
{
return FALSE:
}
static kern_return_t
create_got(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *sect = NULL;
u_int ngots = 0;
u_int i = 0;
if (!target_has_got(kext)) {
rval = KERN_SUCCESS;
goto finish;
}
for (i = 0; i < kext->sects.nitems; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
ngots += kxld_sect_get_ngots(sect, &kext->relocator,
kext->symtab);
}
rval = add_section(kext, §);
require_noerr(rval, finish);
rval = kxld_sect_init_got(sect, ngots);
require_noerr(rval, finish);
kext->got_is_created = TRUE;
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
populate_got(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *sect = NULL;
u_int i = 0;
if (!target_has_got(kext) || !kext->got_is_created) {
rval = KERN_SUCCESS;
goto finish;
}
for (i = 0; i < kext->sects.nitems; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
if (streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)) &&
streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT)))
{
kxld_sect_populate_got(sect, kext->symtab,
kxld_kext_target_needs_swap(kext));
break;
}
}
require_action(i < kext->sects.nitems, finish, rval=KXLD_MISSING_GOT);
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
static boolean_t
target_supports_common(const KXLDKext *kext)
{
check(kext);
return (kext->cputype == CPU_TYPE_I386 ||
kext->cputype == CPU_TYPE_POWERPC);
}
#if KXLD_USER_OR_COMMON
static kern_return_t
resolve_common_symbols(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
KXLDSect *sect = NULL;
kxld_addr_t base_addr = 0;
kxld_size_t size = 0;
kxld_size_t total_size = 0;
u_int align = 0;
u_int max_align = 0;
u_int sectnum = 0;
if (!target_supports_common(kext)) {
rval = KERN_SUCCESS;
goto finish;
}
kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_common, FALSE);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
align = kxld_sym_get_common_align(sym);
size = kxld_sym_get_common_size(sym);
if (align > max_align) max_align = align;
total_size = kxld_align_address(total_size, align) + size;
}
if (total_size) {
sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_COMMON);
if (sect) {
base_addr = sect->base_addr + sect->size;
kxld_sect_grow(sect, total_size, max_align);
} else {
base_addr = 0;
rval = add_section(kext, §);
require_noerr(rval, finish);
kxld_sect_init_zerofill(sect, SEG_DATA, SECT_COMMON,
total_size, max_align);
}
rval = kxld_array_get_index(&kext->sects, sect, §num);
require_noerr(rval, finish);
kxld_symtab_iterator_reset(&iter);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
align = kxld_sym_get_common_align(sym);
size = kxld_sym_get_common_size(sym);
base_addr = kxld_align_address(base_addr, align);
kxld_sym_resolve_common(sym, sectnum, base_addr);
base_addr += size;
}
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
static kern_return_t
get_metaclass_symbol_from_super_meta_class_pointer_symbol(KXLDKext *kext,
KXLDSym *super_metaclass_pointer_sym, KXLDSym **metaclass)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *sect = NULL;
KXLDReloc *reloc = NULL;
uint32_t offset = 0;
check(kext);
check(super_metaclass_pointer_sym);
check(metaclass);
*metaclass = NULL;
sect = kxld_array_get_item(&kext->sects, super_metaclass_pointer_sym->sectnum);
require_action(sect, finish, rval=KERN_FAILURE);
if (kext->is_final_image) {
reloc = kxld_reloc_get_reloc_by_offset(&kext->extrelocs,
super_metaclass_pointer_sym->base_addr);
if (!reloc) {
reloc = kxld_reloc_get_reloc_by_offset(&kext->locrelocs,
super_metaclass_pointer_sym->base_addr);
}
require_action(reloc, finish, rval=KERN_FAILURE);
*metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, kext->file,
kext->symtab);
} else {
offset = kxld_sym_get_section_offset(super_metaclass_pointer_sym, sect);
reloc = kxld_reloc_get_reloc_by_offset(§->relocs, offset);
require_action(reloc, finish, rval=KERN_FAILURE);
*metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, sect->data,
kext->symtab);
}
require_action(*metaclass, finish, rval=KERN_FAILURE);
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymtabIterator iter;
KXLDSym *sym = NULL;
KXLDVTable *vtable = NULL, *src = NULL;
u_int i = 0;
u_int nvtables = 0;
char class_name[KXLD_MAX_NAME_LEN];
char meta_vtable_name[KXLD_MAX_NAME_LEN];
kxld_symtab_iterator_init(&iter, kext->symtab,
kxld_sym_is_class_vtable, FALSE);
nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2;
rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables);
require_noerr(rval, finish);
while ((sym = kxld_symtab_iterator_get_next(&iter))) {
src = kxld_dict_find(patched_vtables, sym->name);
require_action(src, finish, rval=KERN_FAILURE);
vtable = kxld_array_get_item(&kext->vtables, i++);
rval = kxld_vtable_copy(vtable, src);
require_noerr(rval, finish);
rval = kxld_sym_get_class_name_from_vtable(sym,
class_name, sizeof(class_name));
require_noerr(rval, finish);
rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
meta_vtable_name, sizeof(meta_vtable_name));
require_noerr(rval, finish);
src = kxld_dict_find(patched_vtables, meta_vtable_name);
if (src) {
vtable = kxld_array_get_item(&kext->vtables, i++);
rval = kxld_vtable_copy(vtable, src);
require_noerr(rval, finish);
} else {
kxld_array_resize(&kext->vtables, kext->vtables.nitems - 1);
}
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#if KXLD_USER_OR_OBJECT
static kern_return_t
process_relocs_from_sections(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *sect = NULL;
u_int i = 0;
for (i = 0; i < kext->sects.nitems; ++i) {
sect = kxld_array_get_item(&kext->sects, i);
rval = kxld_sect_process_relocs(sect, &kext->relocator,
&kext->sects, kext->symtab);
require_noerr_action(rval, finish,
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidSectReloc,
i, sect->segname, sect->sectname));
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
#if KXLD_USER_OR_BUNDLE
static kern_return_t
process_relocs_from_tables(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDReloc *reloc = NULL;
KXLDSeg *seg = NULL;
u_int i = 0;
seg = kxld_array_get_item(&kext->segs, 0);
for (i = 0; i < kext->extrelocs.nitems; ++i) {
reloc = kxld_array_get_item(&kext->extrelocs, i);
rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg,
kext->file, &kext->sects, kext->symtab);
require_noerr_action(rval, finish,
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidExtReloc, i));
}
for (i = 0; i < kext->locrelocs.nitems; ++i) {
reloc = kxld_array_get_item(&kext->locrelocs, i);
rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg,
kext->file, &kext->sects, kext->symtab);
require_noerr_action(rval, finish,
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidIntReloc, i));
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static void
add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit)
{
if (is_32_bit) {
uint32_t *ptr = (uint32_t *) symptr;
*ptr += (uint32_t) val;
} else {
uint64_t *ptr = (uint64_t *) symptr;
*ptr += (uint64_t) val;
}
}
#define SECT_SYM_PTRS "__nl_symbol_ptr"
static kern_return_t
process_symbol_pointers(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *sect = NULL;
KXLDSym *sym = NULL;
int32_t *symidx = NULL;
u_char *symptr = NULL;
u_long symptrsize = 0;
u_int nsyms = 0;
u_int firstsym = 0;
u_int i = 0;
check(kext);
require_action(kext->is_final_image && kext->dysymtab_hdr,
finish, rval=KERN_FAILURE);
sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_SYM_PTRS);
if (!sect) {
rval = KERN_SUCCESS;
goto finish;
}
require_action(sect->flags & S_NON_LAZY_SYMBOL_POINTERS,
finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
"Section %s,%s does not have S_NON_LAZY_SYMBOL_POINTERS flag.",
SEG_DATA, SECT_SYM_PTRS));
if (kxld_kext_is_32_bit(kext)) {
symptrsize = sizeof(uint32_t);
} else {
symptrsize = sizeof(uint64_t);
}
nsyms = (u_int) (sect->size / symptrsize);
firstsym = sect->reserved1;
require_action(firstsym + nsyms <= kext->dysymtab_hdr->nindirectsyms,
finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
symidx = (int32_t *) (kext->file + kext->dysymtab_hdr->indirectsymoff);
symidx += firstsym;
symptr = sect->data;
for (i = 0; i < nsyms; ++i, ++symidx, symptr+=symptrsize) {
if (*symidx & INDIRECT_SYMBOL_LOCAL) {
if (*symidx & INDIRECT_SYMBOL_ABS) continue;
add_to_ptr(symptr, kext->link_addr, kxld_kext_is_32_bit(kext));
} else {
sym = kxld_symtab_get_symbol_by_index(kext->symtab, *symidx);
require_action(sym, finish, rval=KERN_FAILURE);
add_to_ptr(symptr, sym->link_addr, kxld_kext_is_32_bit(kext));
}
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
static kern_return_t
populate_kmod_info(KXLDKext *kext)
{
kern_return_t rval = KERN_FAILURE;
KXLDSect *kmodsect = NULL;
KXLDSym *kmodsym = NULL;
u_long kmod_offset = 0;
u_long header_size;
u_long size;
if (kext->link_type != KXLD_LINK_KEXT) {
rval = KERN_SUCCESS;
goto finish;
}
kxld_kext_get_vmsize(kext, &header_size, &size);
kmodsym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_KMOD_INFO_SYMBOL);
require_action(kmodsym, finish, rval=KERN_FAILURE;
kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo));
kmodsect = kxld_array_get_item(&kext->sects, kmodsym->sectnum);
kmod_offset = (u_long) (kmodsym->base_addr - kmodsect->base_addr);
kext->kmod_info = (kmod_info_t *) (kmodsect->data + kmod_offset);
kext->kmod_link_addr = kmodsym->link_addr;
if (kxld_kext_is_32_bit(kext)) {
kmod_info_32_v1_t *kmod = (kmod_info_32_v1_t *) (kext->kmod_info);
kmod->address = (uint32_t) kext->link_addr;
kmod->size = (uint32_t) size;
kmod->hdr_size = (uint32_t) header_size;
#if !KERNEL
if (kxld_kext_target_needs_swap(kext)) {
kmod->address = OSSwapInt32(kmod->address);
kmod->size = OSSwapInt32(kmod->size);
kmod->hdr_size = OSSwapInt32(kmod->hdr_size);
}
#endif
} else {
kmod_info_64_v1_t *kmod = (kmod_info_64_v1_t *) (kext->kmod_info);
kmod->address = kext->link_addr;
kmod->size = size;
kmod->hdr_size = header_size;
#if !KERNEL
if (kxld_kext_target_needs_swap(kext)) {
kmod->address = OSSwapInt64(kmod->address);
kmod->size = OSSwapInt64(kmod->size);
kmod->hdr_size = OSSwapInt64(kmod->hdr_size);
}
#endif
}
rval = KERN_SUCCESS;
finish:
return rval;
}