IOBufferMemoryDescriptor.cpp [plain text]
#include <IOKit/assert.h>
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include "IOKitKernelInternal.h"
#include "IOCopyMapper.h"
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
#include <vm/pmap.h>
vm_map_t IOPageableMapForAddress( vm_address_t address );
__END_DECLS
volatile ppnum_t gIOHighestAllocatedPage;
#define super IOGeneralMemoryDescriptor
OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
IOGeneralMemoryDescriptor);
bool IOBufferMemoryDescriptor::initWithAddress(
void * ,
IOByteCount ,
IODirection )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithAddress(
vm_address_t ,
IOByteCount ,
IODirection ,
task_t )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
IOPhysicalAddress ,
IOByteCount ,
IODirection )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
IOPhysicalRange * ,
UInt32 ,
IODirection ,
bool )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithRanges(
IOVirtualRange * ,
UInt32 ,
IODirection ,
task_t ,
bool )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment,
task_t inTask)
{
mach_vm_address_t physicalMask = 0;
return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
}
bool IOBufferMemoryDescriptor::initWithPhysicalMask(
task_t inTask,
IOOptionBits options,
mach_vm_size_t capacity,
mach_vm_address_t alignment,
mach_vm_address_t physicalMask)
{
kern_return_t kr;
task_t mapTask = NULL;
vm_map_t vmmap = NULL;
addr64_t lastIOAddr;
IOAddressRange range;
IOOptionBits iomdOptions = kIOMemoryTypeVirtual64;
if (!capacity)
return false;
_options = options;
_capacity = capacity;
_physAddrs = 0;
_physSegCount = 0;
_buffer = 0;
range.address = 0;
range.length = 0;
_ranges.v64 = ⦥
iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size))
alignment = page_size;
if (physicalMask && (alignment <= 1))
alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
_alignment = alignment;
if (((inTask != kernel_task) && !(options & kIOMemoryPageable)) ||
(physicalMask && (options & kIOMapCacheMask)))
return false;
if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
physicalMask = 0xFFFFFFFF;
vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
switch (options & kIOMapCacheMask)
{
case kIOMapInhibitCache:
SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
break;
case kIOMapWriteThruCache:
SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
break;
case kIOMapWriteCombineCache:
SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
break;
case kIOMapCopybackCache:
SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
break;
case kIOMapDefaultCache:
default:
SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
break;
}
if (options & kIOMemoryPageable)
{
iomdOptions |= kIOMemoryBufferPageable;
memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
if (options & kIOMemoryPurgeable)
memEntryCacheMode |= MAP_MEM_PURGABLE;
}
else
{
memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
if (IOMapper::gSystem)
lastIOAddr = (1UL << 31) - PAGE_SIZE;
else
lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
{
mach_vm_address_t address;
iomdOptions &= ~kIOMemoryTypeVirtual64;
iomdOptions |= kIOMemoryTypePhysical64;
address = IOMallocPhysical(capacity, physicalMask);
_buffer = (void *) address;
if (!_buffer)
return false;
mapTask = inTask;
inTask = 0;
}
else
{
vmmap = kernel_map;
iomdOptions |= kIOMemoryAutoPrepare;
if (options & kIOMemoryPhysicallyContiguous)
_buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
else if (alignment > 1)
_buffer = IOMallocAligned(capacity, alignment);
else
_buffer = IOMalloc(capacity);
if (!_buffer)
return false;
}
}
if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions))
&& (options & (kIOMemoryPageable | kIOMapCacheMask))) {
ipc_port_t sharedMem;
vm_size_t size = round_page_32(capacity);
kr = mach_make_memory_entry(vmmap,
&size, (vm_offset_t)_buffer,
memEntryCacheMode, &sharedMem,
NULL );
if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
ipc_port_release_send( sharedMem );
kr = kIOReturnVMError;
}
if( KERN_SUCCESS != kr)
return( false );
_memEntry = (void *) sharedMem;
if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
debug_iomallocpageable_size += size;
#endif
mapTask = inTask;
if (NULL == inTask)
inTask = kernel_task;
}
else if (options & kIOMapCacheMask)
{
volatile UInt8 * startAddr = (UInt8 *)_buffer;
volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
while (startAddr < endAddr)
{
*startAddr;
startAddr += page_size;
}
}
}
range.address = (mach_vm_address_t) _buffer;
range.length = capacity;
if (!super::initWithOptions(&range, 1, 0,
inTask, iomdOptions, 0))
return false;
if (physicalMask && !IOMapper::gSystem)
{
IOMDDMACharacteristics mdSummary;
bzero(&mdSummary, sizeof(mdSummary));
IOReturn rtn = dmaCommandOperation(
kIOMDGetCharacteristics,
&mdSummary, sizeof(mdSummary));
if (rtn)
return false;
if (mdSummary.fHighestPage)
{
ppnum_t highest;
while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
{
if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
(UInt32 *) &gIOHighestAllocatedPage))
break;
}
lastIOAddr = ptoa_64(mdSummary.fHighestPage);
}
else
lastIOAddr = ptoa_64(gIOLastPage);
if (lastIOAddr != (lastIOAddr & physicalMask))
{
if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask))
{
_physSegCount = 1;
}
return false;
}
}
if (mapTask)
{
if (!reserved) {
reserved = IONew( ExpansionData, 1 );
if( !reserved)
return( false );
}
reserved->map = map(mapTask, 0, kIOMapAnywhere, 0, 0);
if (!reserved->map)
{
_buffer = 0;
return( false );
}
release(); mach_vm_address_t buffer = reserved->map->getAddress();
_buffer = (void *) buffer;
if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
_ranges.v64->address = buffer;
}
setLength(capacity);
return true;
}
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
task_t inTask,
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
bool retry = me->_physSegCount;
me->release();
me = 0;
if (retry)
{
me = new IOBufferMemoryDescriptor;
if (me && !me->initWithOptions(options, capacity, alignment, inTask))
{
me->release();
me = 0;
}
}
}
return me;
}
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
task_t inTask,
IOOptionBits options,
mach_vm_size_t capacity,
mach_vm_address_t physicalMask)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
{
bool retry = me->_physSegCount;
me->release();
me = 0;
if (retry)
{
me = new IOBufferMemoryDescriptor;
if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
{
me->release();
me = 0;
}
}
}
return me;
}
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
return( initWithOptions(options, capacity, alignment, kernel_task) );
}
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
}
IOBufferMemoryDescriptor *
IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
IODirection inDirection,
bool inContiguous)
{
return( IOBufferMemoryDescriptor::withOptions(
inDirection | kIOMemoryUnshared
| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
inCapacity, inContiguous ? inCapacity : 1 ));
}
bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
vm_size_t inLength,
IODirection inDirection,
bool inContiguous)
{
if (!initWithOptions(
inDirection | kIOMemoryUnshared
| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
inLength, inLength ))
return false;
setLength(0);
if (!appendBytes(inBytes, inLength))
return false;
return true;
}
IOBufferMemoryDescriptor *
IOBufferMemoryDescriptor::withBytes(const void * inBytes,
vm_size_t inLength,
IODirection inDirection,
bool inContiguous)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
{
bool retry = me->_physSegCount;
me->release();
me = 0;
if (retry)
{
me = new IOBufferMemoryDescriptor;
if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
{
me->release();
me = 0;
}
}
}
return me;
}
void IOBufferMemoryDescriptor::free()
{
IOOptionBits flags = _flags;
IOOptionBits options = _options;
vm_size_t size = _capacity;
void * buffer = _buffer;
mach_vm_address_t source = (_ranges.v) ? _ranges.v64->address : 0;
IOMemoryMap * map = 0;
vm_offset_t alignment = _alignment;
if (reserved)
{
map = reserved->map;
IODelete( reserved, ExpansionData, 1 );
if (map)
map->release();
}
super::free();
if (options & kIOMemoryPageable)
{
#if IOALLOCDEBUG
debug_iomallocpageable_size -= round_page_32(size);
#endif
}
else if (buffer)
{
if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask))
IOFreePhysical(source, size);
else if (options & kIOMemoryPhysicallyContiguous)
IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
else if (alignment > 1)
IOFreeAligned(buffer, size);
else
IOFree(buffer, size);
}
}
vm_size_t IOBufferMemoryDescriptor::getCapacity() const
{
return _capacity;
}
void IOBufferMemoryDescriptor::setLength(vm_size_t length)
{
assert(length <= _capacity);
_length = length;
_ranges.v64->length = length;
}
void IOBufferMemoryDescriptor::setDirection(IODirection direction)
{
_direction = direction;
}
bool
IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
{
vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
IOByteCount offset;
assert(_length <= _capacity);
offset = _length;
_length += actualBytesToCopy;
_ranges.v64->length += actualBytesToCopy;
if (_task == kernel_task)
bcopy( bytes, (void *)(_ranges.v64->address + offset),
actualBytesToCopy);
else
writeBytes(offset, bytes, actualBytesToCopy);
return true;
}
void * IOBufferMemoryDescriptor::getBytesNoCopy()
{
if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
return _buffer;
else
return (void *)_ranges.v64->address;
}
void *
IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
{
IOVirtualAddress address;
if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
address = (IOVirtualAddress) _buffer;
else
address = _ranges.v64->address;
if (start < _length && (start + withLength) <= _length)
return (void *)(address + start);
return 0;
}
void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
IOByteCount * lengthOfSegment)
{
void * bytes = getBytesNoCopy(offset, 0);
if (bytes && lengthOfSegment)
*lengthOfSegment = _length - offset;
return bytes;
}
OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);