#include <string.h>
#include <mach-o/loader.h>
#include <sys/types.h>
#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
#include <AssertMacros.h>
#include "kxld_demangle.h"
#include "kxld_reloc.h"
#include "kxld_sect.h"
#include "kxld_state.h"
#include "kxld_sym.h"
#include "kxld_symtab.h"
#include "kxld_util.h"
#include "kxld_vtable.h"
#define VTABLE_ENTRY_SIZE_32 4
#define VTABLE_HEADER_LEN_32 2
#define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
#define VTABLE_ENTRY_SIZE_64 8
#define VTABLE_HEADER_LEN_64 2
#define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym,
const KXLDSect *sect, const KXLDSymtab *symtab,
const KXLDRelocator *relocator);
static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable,
const KXLDSym *sym, const KXLDSymtab *symtab,
const KXLDRelocator *relocator, const KXLDArray *relocs);
static kxld_addr_t get_entry_value(u_char *entry, const KXLDRelocator *relocator)
__attribute__((pure));
#if !KERNEL
static kxld_addr_t swap_entry_value(kxld_addr_t entry_value,
const KXLDRelocator *relocator) __attribute__((const));
#endif
static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab,
const KXLDRelocator *relocator);
kern_return_t
kxld_vtable_init_from_kernel_macho(KXLDVTable *vtable, const KXLDSym *sym,
const KXLDSect *sect, const KXLDSymtab *symtab,
const KXLDRelocator *relocator)
{
kern_return_t rval = KERN_FAILURE;
char *demangled_name = NULL;
size_t demangled_length = 0;
check(vtable);
check(sym);
check(sect);
check(symtab);
vtable->name = sym->name;
vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect);
vtable->is_patched = FALSE;
require_action(kxld_sect_get_num_relocs(sect) == 0, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
kKxldLogMalformedVTable,
kxld_demangle(vtable->name, &demangled_name, &demangled_length)));
rval = init_by_entries(vtable, symtab, relocator);
require_noerr(rval, finish);
vtable->is_patched = TRUE;
rval = KERN_SUCCESS;
finish:
if (rval) kxld_vtable_deinit(vtable);
if (demangled_name) kxld_free(demangled_name, demangled_length);
return rval;
}
kern_return_t
kxld_vtable_init_from_object_macho(KXLDVTable *vtable, const KXLDSym *sym,
const KXLDSect *sect, const KXLDSymtab *symtab,
const KXLDRelocator *relocator)
{
kern_return_t rval = KERN_FAILURE;
char *demangled_name = NULL;
size_t demangled_length = 0;
check(vtable);
check(sym);
check(sect);
check(symtab);
vtable->name = sym->name;
vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect);
vtable->is_patched = FALSE;
require_action(kxld_sect_get_num_relocs(sect) > 0, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
kKxldLogMalformedVTable,
kxld_demangle(vtable->name, &demangled_name, &demangled_length)));
rval = init_by_relocs(vtable, sym, sect, symtab, relocator);
require_noerr(rval, finish);
rval = KERN_SUCCESS;
finish:
if (rval) kxld_vtable_deinit(vtable);
if (demangled_name) kxld_free(demangled_name, demangled_length);
return rval;
}
kern_return_t
kxld_vtable_init_from_final_macho(KXLDVTable *vtable, const KXLDSym *sym,
const KXLDSect *sect, const KXLDSymtab *symtab,
const KXLDRelocator *relocator, const KXLDArray *relocs)
{
kern_return_t rval = KERN_FAILURE;
char *demangled_name = NULL;
size_t demangled_length = 0;
check(vtable);
check(sym);
check(sect);
check(symtab);
vtable->name = sym->name;
vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect);
vtable->is_patched = FALSE;
require_action(kxld_sect_get_num_relocs(sect) == 0, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
kKxldLogMalformedVTable,
kxld_demangle(vtable->name, &demangled_name, &demangled_length)));
rval = init_by_entries_and_relocs(vtable, sym, symtab,
relocator, relocs);
require_noerr(rval, finish);
rval = KERN_SUCCESS;
finish:
if (rval) kxld_vtable_deinit(vtable);
if (demangled_name) kxld_free(demangled_name, demangled_length);
return rval;
}
#if KXLD_USER_OR_ILP32
kern_return_t
kxld_vtable_init_from_link_state_32(KXLDVTable *vtable, u_char *file,
KXLDVTableHdr *hdr)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymEntry32 *sym = NULL;
KXLDVTableEntry *entry = NULL;
u_int i = 0;
check(vtable);
check(file);
check(hdr);
vtable->name = (char *) (file + hdr->nameoff);
vtable->is_patched = TRUE;
rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry),
hdr->nentries);
require_noerr(rval, finish);
sym = (KXLDSymEntry32 *) (file + hdr->vtableoff);
for (i = 0; i < vtable->entries.nitems; ++i, ++sym) {
entry = kxld_array_get_item(&vtable->entries, i);
entry->patched.name = (char *) (file + sym->nameoff);
entry->patched.addr = sym->addr;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
#if KXLD_USER_OR_LP64
kern_return_t
kxld_vtable_init_from_link_state_64(KXLDVTable *vtable, u_char *file,
KXLDVTableHdr *hdr)
{
kern_return_t rval = KERN_FAILURE;
KXLDSymEntry64 *sym = NULL;
KXLDVTableEntry *entry = NULL;
u_int i = 0;
check(vtable);
check(file);
check(hdr);
vtable->name = (char *) (file + hdr->nameoff);
vtable->is_patched = TRUE;
rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry),
hdr->nentries);
require_noerr(rval, finish);
sym = (KXLDSymEntry64 *) (file + hdr->vtableoff);
for (i = 0; i < vtable->entries.nitems; ++i, ++sym) {
entry = kxld_array_get_item(&vtable->entries, i);
entry->patched.name = (char *) (file + sym->nameoff);
entry->patched.addr = sym->addr;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
#endif
kern_return_t
kxld_vtable_copy(KXLDVTable *vtable, const KXLDVTable *src)
{
kern_return_t rval = KERN_FAILURE;
check(vtable);
check(src);
vtable->vtable = src->vtable;
vtable->name = src->name;
vtable->is_patched = src->is_patched;
rval = kxld_array_copy(&vtable->entries, &src->entries);
require_noerr(rval, finish);
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym, const KXLDSect *sect,
const KXLDSymtab *symtab, const KXLDRelocator *relocator)
{
kern_return_t rval = KERN_FAILURE;
KXLDReloc *reloc = NULL;
KXLDVTableEntry *entry = NULL;
KXLDSym *tmpsym = NULL;
kxld_addr_t vtable_base_offset = 0;
kxld_addr_t entry_offset = 0;
u_int i = 0;
u_int nentries = 0;
u_int vtable_entry_size = 0;
u_int base_reloc_index = 0;
u_int reloc_index = 0;
check(vtable);
check(sym);
check(sect);
check(symtab);
check(relocator);
vtable_base_offset = kxld_sym_get_section_offset(sym, sect);
if (relocator->is_32_bit) {
vtable_entry_size = VTABLE_ENTRY_SIZE_32;
vtable_base_offset += VTABLE_HEADER_SIZE_32;
} else {
vtable_entry_size = VTABLE_ENTRY_SIZE_64;
vtable_base_offset += VTABLE_HEADER_SIZE_64;
}
rval = kxld_reloc_get_reloc_index_by_offset(§->relocs,
vtable_base_offset, &base_reloc_index);
require_noerr(rval, finish);
reloc_index = base_reloc_index;
entry_offset = vtable_base_offset;
reloc = kxld_array_get_item(§->relocs, reloc_index);
while (reloc->address == entry_offset) {
++nentries;
if (!reloc_index) break;
--reloc_index;
reloc = kxld_array_get_item(§->relocs, reloc_index);
entry_offset += vtable_entry_size;
}
rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
require_noerr(rval, finish);
for (i = 0; i < vtable->entries.nitems; ++i) {
reloc = kxld_array_get_item(§->relocs, base_reloc_index - i);
entry = kxld_array_get_item(&vtable->entries, i);
tmpsym = kxld_reloc_get_symbol(relocator, reloc, sect->data, symtab);
entry->unpatched.sym = tmpsym;
entry->unpatched.reloc = reloc;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static kxld_addr_t
get_entry_value(u_char *entry, const KXLDRelocator *relocator)
{
kxld_addr_t entry_value;
if (relocator->is_32_bit) {
entry_value = *(uint32_t *)entry;
} else {
entry_value = *(uint64_t *)entry;
}
return entry_value;
}
#if !KERNEL
static kxld_addr_t
swap_entry_value(kxld_addr_t entry_value, const KXLDRelocator *relocator)
{
if (relocator->is_32_bit) {
entry_value = OSSwapInt32((uint32_t) entry_value);
} else {
entry_value = OSSwapInt64((uint64_t) entry_value);
}
return entry_value;
}
#endif
static kern_return_t
init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab,
const KXLDRelocator *relocator)
{
kern_return_t rval = KERN_FAILURE;
KXLDVTableEntry *tmpentry = NULL;
KXLDSym *sym = NULL;
u_char *base_entry = NULL;
u_char *entry = NULL;
kxld_addr_t entry_value = 0;
u_int vtable_entry_size = 0;
u_int vtable_header_size = 0;
u_int nentries = 0;
u_int i = 0;
if (relocator->is_32_bit) {
vtable_entry_size = VTABLE_ENTRY_SIZE_32;
vtable_header_size = VTABLE_HEADER_SIZE_32;
} else {
vtable_entry_size = VTABLE_ENTRY_SIZE_64;
vtable_header_size = VTABLE_HEADER_SIZE_64;
}
base_entry = vtable->vtable + vtable_header_size;
entry = base_entry;
entry_value = get_entry_value(entry, relocator);
while (entry_value) {
++nentries;
entry += vtable_entry_size;
entry_value = get_entry_value(entry, relocator);
}
rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
require_noerr(rval, finish);
entry = base_entry;
rval = KERN_SUCCESS;
for (i = 0; i < vtable->entries.nitems; ++i) {
entry = base_entry + (i * vtable_entry_size);
entry_value = get_entry_value(entry, relocator);
#if !KERNEL
if (relocator->swap) {
entry_value = swap_entry_value(entry_value, relocator);
}
#endif
tmpentry = kxld_array_get_item(&vtable->entries, i);
sym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value);
if (sym) {
tmpentry->patched.name = sym->name;
tmpentry->patched.addr = sym->link_addr;
} else {
tmpentry->patched.name = NULL;
tmpentry->patched.addr = 0;
}
}
rval = KERN_SUCCESS;
finish:
return rval;
}
static kern_return_t
init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *sym,
const KXLDSymtab *symtab, const KXLDRelocator *relocator,
const KXLDArray *relocs)
{
kern_return_t rval = KERN_FAILURE;
KXLDReloc *reloc = NULL;
KXLDVTableEntry *tmpentry = NULL;
KXLDSym *tmpsym = NULL;
u_int vtable_entry_size = 0;
u_int vtable_header_size = 0;
u_char *base_entry = NULL;
u_char *entry = NULL;
kxld_addr_t entry_value = 0;
kxld_addr_t base_entry_offset = 0;
kxld_addr_t entry_offset = 0;
u_int nentries = 0;
u_int i = 0;
char *demangled_name1 = NULL;
size_t demangled_length1 = 0;
check(vtable);
check(sym);
check(symtab);
check(relocs);
if (relocator->is_32_bit) {
vtable_entry_size = VTABLE_ENTRY_SIZE_32;
vtable_header_size = VTABLE_HEADER_SIZE_32;
} else {
vtable_entry_size = VTABLE_ENTRY_SIZE_64;
vtable_header_size = VTABLE_HEADER_SIZE_64;
}
base_entry = vtable->vtable + vtable_header_size;
base_entry_offset = sym->base_addr;
base_entry_offset += vtable_header_size;
entry = base_entry;
entry_value = get_entry_value(entry, relocator);
entry_offset = base_entry_offset;
while (1) {
entry_value = get_entry_value(entry, relocator);
if (!entry_value) {
reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset);
if (!reloc) break;
}
++nentries;
entry += vtable_entry_size;
entry_offset += vtable_entry_size;
}
rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
require_noerr(rval, finish);
entry = base_entry;
entry_value = get_entry_value(entry, relocator);
entry_offset = base_entry_offset;
for (i = 0; i < vtable->entries.nitems; ++i) {
entry_value = get_entry_value(entry, relocator);
if (entry_value) {
#if !KERNEL
if (relocator->swap) {
entry_value = swap_entry_value(entry_value, relocator);
}
#endif
reloc = NULL;
tmpsym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value);
} else {
reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset);
require_action(reloc, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
kKxldLogMalformedVTable,
kxld_demangle(vtable->name, &demangled_name1,
&demangled_length1)));
tmpsym = kxld_reloc_get_symbol(relocator, reloc,
NULL, symtab);
}
tmpentry = kxld_array_get_item(&vtable->entries, i);
tmpentry->unpatched.reloc = reloc;
tmpentry->unpatched.sym = tmpsym;
entry += vtable_entry_size;
entry_offset += vtable_entry_size;
}
rval = KERN_SUCCESS;
finish:
return rval;
}
void
kxld_vtable_clear(KXLDVTable *vtable)
{
check(vtable);
vtable->vtable = NULL;
vtable->name = NULL;
vtable->is_patched = FALSE;
kxld_array_clear(&vtable->entries);
}
void
kxld_vtable_deinit(KXLDVTable *vtable)
{
check(vtable);
kxld_array_deinit(&vtable->entries);
bzero(vtable, sizeof(*vtable));
}
kern_return_t
kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable,
KXLDSymtab *symtab, boolean_t strict_patching __unused)
{
kern_return_t rval = KERN_FAILURE;
KXLDVTableEntry *child_entry = NULL;
KXLDVTableEntry *parent_entry = NULL;
KXLDSym *sym = NULL;
u_int symindex = 0;
u_int i = 0;
char *demangled_name1 = NULL;
char *demangled_name2 = NULL;
char *demangled_name3 = NULL;
size_t demangled_length1 = 0;
size_t demangled_length2 = 0;
size_t demangled_length3 = 0;
check(vtable);
check(super_vtable);
require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS);
require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish,
rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable,
kxld_demangle(vtable->name, &demangled_name1, &demangled_length1)));
for (i = 0; i < super_vtable->entries.nitems; ++i) {
child_entry = kxld_array_get_item(&vtable->entries, i);
parent_entry = kxld_array_get_item(&super_vtable->entries, i);
if (!child_entry->unpatched.sym) continue;
if (!parent_entry->patched.name) continue;
if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue;
if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue;
if (streq(child_entry->unpatched.sym->name,
parent_entry->patched.name))
{
continue;
}
require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name),
finish, rval=KERN_FAILURE;
kxld_log(kKxldLogPatching, kKxldLogErr,
kKxldLogParentOutOfDate,
kxld_demangle(super_vtable->name, &demangled_name1,
&demangled_length1),
kxld_demangle(vtable->name, &demangled_name2,
&demangled_length2)));
#if KXLD_USER_OR_STRICT_PATCHING
if (strict_patching && !kxld_sym_is_defined(child_entry->unpatched.sym))
{
char class_name[KXLD_MAX_NAME_LEN];
char function_prefix[KXLD_MAX_NAME_LEN];
u_long function_prefix_len = 0;
rval = kxld_sym_get_class_name_from_vtable_name(vtable->name,
class_name, sizeof(class_name));
require_noerr(rval, finish);
function_prefix_len =
kxld_sym_get_function_prefix_from_class_name(class_name,
function_prefix, sizeof(function_prefix));
require(function_prefix_len, finish);
if (!strncmp(child_entry->unpatched.sym->name,
function_prefix, function_prefix_len))
{
continue;
}
}
#endif
sym = kxld_symtab_get_symbol_by_name(symtab, parent_entry->patched.name);
if (!sym) {
rval = kxld_symtab_add_symbol(symtab, parent_entry->patched.name,
parent_entry->patched.addr, &sym);
require_noerr(rval, finish);
}
require_action(sym, finish, rval=KERN_FAILURE);
rval = kxld_symtab_get_sym_index(symtab, sym, &symindex);
require_noerr(rval, finish);
rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex);
require_noerr(rval, finish);
kxld_log(kKxldLogPatching, kKxldLogDetail,
"In vtable '%s', patching '%s' with '%s'.",
kxld_demangle(vtable->name, &demangled_name1, &demangled_length1),
kxld_demangle(child_entry->unpatched.sym->name,
&demangled_name2, &demangled_length2),
kxld_demangle(sym->name, &demangled_name3, &demangled_length3));
kxld_sym_patch(child_entry->unpatched.sym);
child_entry->unpatched.sym = sym;
}
for (i = 0; i < vtable->entries.nitems; ++i) {
char *name;
kxld_addr_t addr;
child_entry = kxld_array_get_item(&vtable->entries, i);
if (child_entry->unpatched.sym) {
name = child_entry->unpatched.sym->name;
addr = child_entry->unpatched.sym->link_addr;
} else {
name = NULL;
addr = 0;
}
child_entry->patched.name = name;
child_entry->patched.addr = addr;
}
vtable->is_patched = TRUE;
rval = KERN_SUCCESS;
finish:
if (demangled_name1) kxld_free(demangled_name1, demangled_length1);
if (demangled_name2) kxld_free(demangled_name2, demangled_length2);
if (demangled_name3) kxld_free(demangled_name3, demangled_length3);
return rval;
}