IOBufferMemoryDescriptor.cpp [plain text]
#include <IOKit/assert.h>
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include "IOKitKernelInternal.h"
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
#include <vm/pmap.h>
vm_map_t IOPageableMapForAddress( vm_address_t address );
__END_DECLS
#define super IOGeneralMemoryDescriptor
OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
IOGeneralMemoryDescriptor);
bool IOBufferMemoryDescriptor::initWithAddress(
void * ,
IOByteCount ,
IODirection )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithAddress(
vm_address_t ,
IOByteCount ,
IODirection ,
task_t )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
IOPhysicalAddress ,
IOByteCount ,
IODirection )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
IOPhysicalRange * ,
UInt32 ,
IODirection ,
bool )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithRanges(
IOVirtualRange * ,
UInt32 ,
IODirection ,
task_t ,
bool )
{
return false;
}
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment,
task_t inTask)
{
kern_return_t kr;
vm_map_t vmmap = 0;
IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
if (!capacity)
return false;
_options = options;
_capacity = capacity;
_physAddrs = 0;
_physSegCount = 0;
_buffer = 0;
iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
alignment = page_size;
if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
return false;
_alignment = alignment;
if (options & kIOMemoryPageable)
{
iomdOptions |= kIOMemoryBufferPageable;
ipc_port_t sharedMem;
vm_size_t size = round_page_32(capacity);
vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
| MAP_MEM_NAMED_CREATE;
if (options & kIOMemoryPurgeable)
memEntryCacheMode |= MAP_MEM_PURGABLE;
switch (options & kIOMapCacheMask)
{
case kIOMapInhibitCache:
SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
break;
case kIOMapWriteThruCache:
SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
break;
case kIOMapWriteCombineCache:
SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
break;
case kIOMapCopybackCache:
SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
break;
case kIOMapDefaultCache:
default:
SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
break;
}
kr = mach_make_memory_entry( vmmap,
&size, 0,
memEntryCacheMode, &sharedMem,
NULL );
if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
ipc_port_release_send( sharedMem );
kr = kIOReturnVMError;
}
if( KERN_SUCCESS != kr)
return( false );
_memEntry = (void *) sharedMem;
#if IOALLOCDEBUG
debug_iomallocpageable_size += size;
#endif
if ((NULL == inTask) && (options & kIOMemoryPageable))
inTask = kernel_task;
else if (inTask == kernel_task)
{
vmmap = kernel_map;
}
else
{
if( !reserved) {
reserved = IONew( ExpansionData, 1 );
if( !reserved)
return( false );
}
vmmap = get_task_map(inTask);
vm_map_reference(vmmap);
reserved->map = vmmap;
}
}
else
{
iomdOptions |= kIOMemoryAutoPrepare;
if (options & kIOMemoryPhysicallyContiguous)
_buffer = IOMallocContiguous(capacity, alignment, 0);
else if (alignment > 1)
_buffer = IOMallocAligned(capacity, alignment);
else
_buffer = IOMalloc(capacity);
if (!_buffer)
return false;
}
_singleRange.v.address = (vm_address_t) _buffer;
_singleRange.v.length = capacity;
if (!super::initWithOptions(&_singleRange.v, 1, 0,
inTask, iomdOptions, 0))
return false;
if (options & kIOMemoryPageable)
{
kern_return_t kr;
if (vmmap)
{
kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, round_page_32(capacity));
if (KERN_SUCCESS != kr)
{
_buffer = 0;
return( false );
}
_singleRange.v.address = (vm_address_t) _buffer;
}
}
setLength(capacity);
return true;
}
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
task_t inTask,
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
me->release();
me = 0;
}
return me;
}
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
return( initWithOptions(options, capacity, alignment, kernel_task) );
}
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
IOOptionBits options,
vm_size_t capacity,
vm_offset_t alignment)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
me->release();
me = 0;
}
return me;
}
IOBufferMemoryDescriptor *
IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
IODirection inDirection,
bool inContiguous)
{
return( IOBufferMemoryDescriptor::withOptions(
inDirection | kIOMemoryUnshared
| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
inCapacity, inContiguous ? inCapacity : 1 ));
}
bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
vm_size_t inLength,
IODirection inDirection,
bool inContiguous)
{
if (!initWithOptions(
inDirection | kIOMemoryUnshared
| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
inLength, inLength ))
return false;
setLength(0);
if (!appendBytes(inBytes, inLength))
return false;
return true;
}
IOBufferMemoryDescriptor *
IOBufferMemoryDescriptor::withBytes(const void * inBytes,
vm_size_t inLength,
IODirection inDirection,
bool inContiguous)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
me->release();
me = 0;
}
return me;
}
void IOBufferMemoryDescriptor::free()
{
IOOptionBits options = _options;
vm_size_t size = _capacity;
void * buffer = _buffer;
vm_map_t vmmap = 0;
vm_offset_t alignment = _alignment;
if (reserved)
{
vmmap = reserved->map;
IODelete( reserved, ExpansionData, 1 );
}
super::free();
if (options & kIOMemoryPageable)
{
#if IOALLOCDEBUG
if (!buffer || vmmap)
debug_iomallocpageable_size -= round_page_32(size);
#endif
if (buffer)
{
if (vmmap)
vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
else
IOFreePageable(buffer, size);
}
}
else if (buffer)
{
if (options & kIOMemoryPhysicallyContiguous)
IOFreeContiguous(buffer, size);
else if (alignment > 1)
IOFreeAligned(buffer, size);
else
IOFree(buffer, size);
}
if (vmmap)
vm_map_deallocate(vmmap);
}
vm_size_t IOBufferMemoryDescriptor::getCapacity() const
{
return _capacity;
}
void IOBufferMemoryDescriptor::setLength(vm_size_t length)
{
assert(length <= _capacity);
_length = length;
_singleRange.v.length = length;
}
void IOBufferMemoryDescriptor::setDirection(IODirection direction)
{
_direction = direction;
}
bool
IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
{
vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
assert(_length <= _capacity);
bcopy( bytes, (void *)(_singleRange.v.address + _length),
actualBytesToCopy);
_length += actualBytesToCopy;
_singleRange.v.length += actualBytesToCopy;
return true;
}
void * IOBufferMemoryDescriptor::getBytesNoCopy()
{
return (void *)_singleRange.v.address;
}
void *
IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
{
if (start < _length && (start + withLength) <= _length)
return (void *)(_singleRange.v.address + start);
return 0;
}
OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);