#include <mach/mach_types.h>
#include <mach/machine.h>
#include <mach/vm_map.h>
#include <i386/machine_routines.h>
#include <i386/misc_protos.h>
#include <machine/cpu_capabilities.h>
#include <machine/commpage.h>
#include <machine/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <ipc/ipc_port.h>
#include <kern/page_decrypt.h>
extern commpage_descriptor* commpage_32_routines[];
extern commpage_descriptor* commpage_64_routines[];
extern commpage_descriptor sigdata_descriptor;
extern commpage_descriptor *ba_descriptors[];
extern vm_map_t com_region_map32; extern vm_map_t com_region_map64;
char *commPagePtr32 = NULL; char *commPagePtr64 = NULL; int _cpu_capabilities = 0;
int noVMX = 0;
void* dsmos_blobs[3];
int dsmos_blob_count = 0;
static uintptr_t next; static int cur_routine; static int matched;
static char *commPagePtr; static size_t commPageBaseOffset;
static void*
commpage_allocate(
vm_map_t submap, size_t area_used ) {
vm_offset_t kernel_addr; vm_offset_t zero = 0;
vm_size_t size = area_used; vm_map_entry_t entry;
ipc_port_t handle;
if (submap == NULL)
panic("commpage submap is null");
if (vm_allocate(kernel_map,&kernel_addr,area_used,VM_FLAGS_ANYWHERE))
panic("cannot allocate commpage");
if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE))
panic("cannot wire commpage");
if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map)
panic("cannot find commpage entry");
entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
if (mach_make_memory_entry( kernel_map, &size, kernel_addr, VM_PROT_DEFAULT, &handle, NULL )) panic("cannot make entry for commpage");
if (vm_map_64( submap, &zero, area_used, 0, VM_FLAGS_FIXED, handle, 0, FALSE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE )) panic("cannot map commpage");
ipc_port_release(handle);
return (void*) kernel_addr; }
static void*
commpage_addr_of(
int addr_at_runtime )
{
return (void*) ((uintptr_t)commPagePtr + addr_at_runtime - commPageBaseOffset);
}
static int
commpage_cpus( void )
{
int cpus;
cpus = ml_get_max_cpus();
if (cpus == 0)
panic("commpage cpus==0");
if (cpus > 0xFF)
cpus = 0xFF;
return cpus;
}
static void
commpage_init_cpu_capabilities( void )
{
int bits;
int cpus;
ml_cpu_info_t cpu_info;
bits = 0;
ml_cpu_get_info(&cpu_info);
switch (cpu_info.vector_unit) {
case 6:
bits |= kHasSupplementalSSE3;
case 5:
bits |= kHasSSE3;
case 4:
bits |= kHasSSE2;
case 3:
bits |= kHasSSE;
case 2:
bits |= kHasMMX;
default:
break;
}
switch (cpu_info.cache_line_size) {
case 128:
bits |= kCache128;
break;
case 64:
bits |= kCache64;
break;
case 32:
bits |= kCache32;
break;
default:
break;
}
cpus = commpage_cpus();
if (cpus == 1)
bits |= kUP;
bits |= (cpus << kNumCPUsShift);
bits |= kFastThreadLocalStorage;
if (cpu_mode_is64bit()) bits |= k64Bit;
_cpu_capabilities = bits; }
int
_get_cpu_capabilities()
{
return _cpu_capabilities;
}
static void
commpage_stuff(
int address,
const void *source,
int length )
{
void *dest = commpage_addr_of(address);
if ((uintptr_t)dest < next)
panic("commpage overlap at address 0x%x, 0x%x < 0x%x", address, dest, next);
bcopy(source,dest,length);
next = ((uintptr_t)dest + length);
}
static void
commpage_stuff_swap(
int address,
void *source,
int length,
int legacy )
{
if ( legacy ) {
void *dest = commpage_addr_of(address);
dest = (void *)((uintptr_t) dest + _COMM_PAGE_SIGS_OFFSET);
switch (length) {
case 2:
OSWriteSwapInt16(dest, 0, *(uint16_t *)source);
break;
case 4:
OSWriteSwapInt32(dest, 0, *(uint32_t *)source);
break;
case 8:
OSWriteSwapInt64(dest, 0, *(uint64_t *)source);
break;
}
}
}
static void
commpage_stuff2(
int address,
void *source,
int length,
int legacy )
{
commpage_stuff_swap(address, source, length, legacy);
commpage_stuff(address, source, length);
}
static void
commpage_stuff_routine(
commpage_descriptor *rd )
{
int must,cant;
if (rd->commpage_address != cur_routine) {
if ((cur_routine!=0) && (matched==0))
panic("commpage no match for last, next address %08x", rd->commpage_address);
cur_routine = rd->commpage_address;
matched = 0;
}
must = _cpu_capabilities & rd->musthave;
cant = _cpu_capabilities & rd->canthave;
if ((must == rd->musthave) && (cant == 0)) {
if (matched)
panic("commpage multiple matches for address %08x", rd->commpage_address);
matched = 1;
commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
}
}
static void
commpage_populate_one(
vm_map_t submap, char ** kernAddressPtr, size_t area_used, size_t base_offset, commpage_descriptor** commpage_routines, boolean_t legacy, const char* signature ) {
short c2;
static double two52 = 1048576.0 * 1048576.0 * 4096.0; static double ten6 = 1000000.0; commpage_descriptor **rd;
short version = _COMM_PAGE_THIS_VERSION;
int swapcaps;
next = (uintptr_t) NULL;
cur_routine = 0;
commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used );
*kernAddressPtr = commPagePtr; commPageBaseOffset = base_offset;
commpage_stuff(_COMM_PAGE_SIGNATURE,signature,strlen(signature));
commpage_stuff2(_COMM_PAGE_VERSION,&version,sizeof(short),legacy);
commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
swapcaps = 0x44;
if ( _cpu_capabilities & kUP )
swapcaps |= (kUP + (1 << kNumCPUsShift));
else
swapcaps |= 2 << kNumCPUsShift;
if ( ! noVMX )
swapcaps |= 0x101;
commpage_stuff_swap(_COMM_PAGE_CPU_CAPABILITIES, &swapcaps, sizeof(int), legacy);
c2 = 32;
commpage_stuff_swap(_COMM_PAGE_CACHE_LINESIZE,&c2,2,legacy);
if (_cpu_capabilities & kCache32)
c2 = 32;
else if (_cpu_capabilities & kCache64)
c2 = 64;
else if (_cpu_capabilities & kCache128)
c2 = 128;
commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
if ( legacy ) {
commpage_stuff2(_COMM_PAGE_2_TO_52,&two52,8,legacy);
commpage_stuff2(_COMM_PAGE_10_TO_6,&ten6,8,legacy);
}
for( rd = commpage_routines; *rd != NULL ; rd++ )
commpage_stuff_routine(*rd);
if (!matched)
panic("commpage no match on last routine");
if (next > (uintptr_t)_COMM_PAGE_END)
panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%08x", next, (uintptr_t)commPagePtr);
if ( legacy ) {
next = (uintptr_t) NULL;
for( rd = ba_descriptors; *rd != NULL ; rd++ )
commpage_stuff_routine(*rd);
next = (uintptr_t) NULL;
commpage_stuff_routine(&sigdata_descriptor);
}
dsmos_blobs[dsmos_blob_count++] =
commpage_addr_of( _COMM_PAGE_SYSTEM_INTEGRITY );
}
void
commpage_populate( void )
{
commpage_init_cpu_capabilities();
commpage_populate_one( com_region_map32,
&commPagePtr32,
_COMM_PAGE32_AREA_USED,
_COMM_PAGE32_BASE_ADDRESS,
commpage_32_routines,
TRUE,
"commpage 32-bit");
pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS,
_COMM_PAGE32_AREA_USED/INTEL_PGBYTES);
if (_cpu_capabilities & k64Bit) {
commpage_populate_one( com_region_map64,
&commPagePtr64,
_COMM_PAGE64_AREA_USED,
_COMM_PAGE32_START_ADDRESS,
commpage_64_routines,
FALSE,
"commpage 64-bit");
pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS,
_COMM_PAGE64_AREA_USED/INTEL_PGBYTES);
}
rtc_nanotime_init_commpage();
}